content
stringlengths 86
88.9k
| title
stringlengths 0
150
| question
stringlengths 1
35.8k
| answers
sequence | answers_scores
sequence | non_answers
sequence | non_answers_scores
sequence | tags
sequence | name
stringlengths 30
130
|
---|---|---|---|---|---|---|---|---|
Q:
get key from a javascript object with minimum value
I am trying to get key from javascript object having a minium value.
var myobj = {"1632":45,"1856":12,"1848":56,"1548":34,"1843":88,"1451":55,"4518":98,"1818":23,"3458":45,"1332":634,"4434":33};
i have to get the key which having minimum value. i.e:
1856
trying hard to get. i am new with object manipulation.
A:
Short and Sweet :
let key = Object.keys(obj).reduce((key, v) => obj[v] < obj[key] ? v : key);
A:
Cou could use Array#reduce.
var object = { "1632": 45, "1856": 12, "1848": 56, "1548": 34, "1843": 88, "1451": 55, "4518": 98, "1818": 23, "3458": 45, "1332": 634, "4434": 33 },
key = Object.keys(object).reduce(function (r, a, i) {
return !i || +object[a] < +object[r] ? a : r;
}, undefined);
console.log(key);
A:
Iterate over the object properties and get key based on min value.
var myjson = {
"1632": 45,
"1856": 12,
"1848": 56,
"1548": 34,
"1843": 88,
"1451": 55,
"4518": 98,
"1818": 23,
"3458": 45,
"1332": 634,
"4434": 33
};
// get object keys array
var keys = Object.keys(myjson),
// set initial value as first elemnt in array
res = keys[0];
// iterate over array elements
keys.forEach(function(v) {
// compare with current property value and update with the min value property
res = +myjson[res] > +myjson[v] ? v : res;
});
console.log(res);
A:
Learner's approach by for-looping:
var myobj = {"1632":45,"1856":12,"1848":56,"1548":34,"1843":88,"1451":55,"4518":98,"1818":23,"3458":45,"1332":634,"4434":33};
// Get the keys of myobj so we can iterate through it
var keys = Object.keys(myobj);
// Iterate through all the key values
var minimumKey = keys[0];
for(var i = 1; i < keys.length; i++){
var minimum = myobj[minimumKey];
var value = myobj[keys[i]];
if(minimum > value) minimumKey = keys[i];
}
console.log(minimumKey, myobj[minimumKey]);
A more functional approach:
var myobj = {"1632":45,"1856":12,"1848":56,"1548":34,"1843":88,"1451":55,"4518":98,"1818":23,"3458":45,"1332":634,"4434":33};
var minimum = Object.keys(myobj).map(function(key){
return {
"key": key,
"value": myobj[key]
}
}).sort(function(a, b){
return a.value - b.value
})[0];
console.log(minimum);
console.log(minimum.key);
console.log(minimum.value);
A:
You may try this:
var xx={"1632":45,"1856":12,"1848":56,"1548":34,"1843":88,"1451":55,"4518":98,"1818":23,"3458":45,"1332":634,"4434":33};
var z=_.keys(_.pick(xx, function(value, key, object) {
return (value==_.min(_.values(xx)));
}))[0];
document.getElementById("my_div").innerHTML=z;
<script src="http://underscorejs.org/underscore-min.js"></script>
<div id="my_div"> </div>
A 3rd party library underscore.js has been used. you should try it:
http://underscorejs.org/underscore-min.js
A:
What I did, was iterate over the map, and if the value is lower than your current answer, then I overwrite the answer (100000 was the max value possible):
let answer = [100000, 100000]
const mapIterator = mapObject[Symbol.iterator]();
for (const item of mapIterator) {
if (item[1] < answer[1]) answer = item
}
console.log(answer);
Map iterator docs: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Map/@@iterator
| get key from a javascript object with minimum value | I am trying to get key from javascript object having a minium value.
var myobj = {"1632":45,"1856":12,"1848":56,"1548":34,"1843":88,"1451":55,"4518":98,"1818":23,"3458":45,"1332":634,"4434":33};
i have to get the key which having minimum value. i.e:
1856
trying hard to get. i am new with object manipulation.
| [
"Short and Sweet :\nlet key = Object.keys(obj).reduce((key, v) => obj[v] < obj[key] ? v : key);\n\n",
"Cou could use Array#reduce.\n\n\nvar object = { \"1632\": 45, \"1856\": 12, \"1848\": 56, \"1548\": 34, \"1843\": 88, \"1451\": 55, \"4518\": 98, \"1818\": 23, \"3458\": 45, \"1332\": 634, \"4434\": 33 },\r\n key = Object.keys(object).reduce(function (r, a, i) {\r\n return !i || +object[a] < +object[r] ? a : r;\r\n }, undefined);\r\n\r\nconsole.log(key);\n\n\n\n",
"Iterate over the object properties and get key based on min value.\n\n\nvar myjson = {\r\n \"1632\": 45,\r\n \"1856\": 12,\r\n \"1848\": 56,\r\n \"1548\": 34,\r\n \"1843\": 88,\r\n \"1451\": 55,\r\n \"4518\": 98,\r\n \"1818\": 23,\r\n \"3458\": 45,\r\n \"1332\": 634,\r\n \"4434\": 33\r\n};\r\n\r\n// get object keys array\r\nvar keys = Object.keys(myjson),\r\n // set initial value as first elemnt in array\r\n res = keys[0];\r\n\r\n// iterate over array elements\r\nkeys.forEach(function(v) {\r\n // compare with current property value and update with the min value property\r\n res = +myjson[res] > +myjson[v] ? v : res;\r\n});\r\n\r\nconsole.log(res);\n\n\n\n",
"Learner's approach by for-looping:\nvar myobj = {\"1632\":45,\"1856\":12,\"1848\":56,\"1548\":34,\"1843\":88,\"1451\":55,\"4518\":98,\"1818\":23,\"3458\":45,\"1332\":634,\"4434\":33};\n\n// Get the keys of myobj so we can iterate through it\nvar keys = Object.keys(myobj);\n\n// Iterate through all the key values\nvar minimumKey = keys[0];\nfor(var i = 1; i < keys.length; i++){\n var minimum = myobj[minimumKey];\n var value = myobj[keys[i]];\n if(minimum > value) minimumKey = keys[i];\n}\n\nconsole.log(minimumKey, myobj[minimumKey]);\n\nA more functional approach:\nvar myobj = {\"1632\":45,\"1856\":12,\"1848\":56,\"1548\":34,\"1843\":88,\"1451\":55,\"4518\":98,\"1818\":23,\"3458\":45,\"1332\":634,\"4434\":33};\n\nvar minimum = Object.keys(myobj).map(function(key){\n return {\n \"key\": key,\n \"value\": myobj[key]\n }\n}).sort(function(a, b){\n return a.value - b.value\n})[0];\n\nconsole.log(minimum);\nconsole.log(minimum.key);\nconsole.log(minimum.value);\n\n",
"You may try this:\n\n\nvar xx={\"1632\":45,\"1856\":12,\"1848\":56,\"1548\":34,\"1843\":88,\"1451\":55,\"4518\":98,\"1818\":23,\"3458\":45,\"1332\":634,\"4434\":33};\r\n\r\n\r\nvar z=_.keys(_.pick(xx, function(value, key, object) {\r\nreturn (value==_.min(_.values(xx))); \r\n}))[0];\r\n\r\ndocument.getElementById(\"my_div\").innerHTML=z;\n<script src=\"http://underscorejs.org/underscore-min.js\"></script>\r\n\r\n<div id=\"my_div\"> </div>\n\n\n\nA 3rd party library underscore.js has been used. you should try it:\nhttp://underscorejs.org/underscore-min.js\n",
"What I did, was iterate over the map, and if the value is lower than your current answer, then I overwrite the answer (100000 was the max value possible):\nlet answer = [100000, 100000]\n\nconst mapIterator = mapObject[Symbol.iterator]();\n\nfor (const item of mapIterator) {\n if (item[1] < answer[1]) answer = item\n}\nconsole.log(answer);\n\nMap iterator docs: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Map/@@iterator\n"
] | [
11,
2,
2,
1,
0,
0
] | [] | [] | [
"javascript"
] | stackoverflow_0038008307_javascript.txt |
Q:
Exact value of a root on Python
I'm writing a programme that converts complex numbers.
Right now I'm having problems with this piece of code:
import numpy
complexnr = 1+1j
mod= numpy.absolute(complexnr)
print(mod)
The output of this code is:
1.4142135623730951
I would like to get √2 as the output.
I have been advised to use the sympy module but I have had no luck with this either. What would be the easiest way to get this result?
EDIT
input_list = ["Enter your complex number (a+bi): ", \
"Degrees or radians?", \
"To how many decimal places do you want to round the argument?"]
output = multenterbox(text, title, input_list)
algebraline = output[0]
choice = output[1]
round2 = int(output[2])
#converting complex number to a suitable form for numpy
if "i" in algebraline:
j = algebraline.replace("i","j")
indeks = algebraline.index("i")
list = []
for element in algebraline:
list.append(element)
if "i" in algebraline and algebraline[indeks-1]=="+" or algebraline[indeks-1]=="-":
list.insert(indeks, 1)
x = "".join(str(e) for e in list)
j = x.replace("i","j")
arv = eval(j)
elif "i" not in algebraline:
arv = eval(algebraline)
#let's find the module
a = int(list[0])
b = int(list[2])
module = sqrt(a**2+b**2)
this method works well when the complex number is 1+i for example, however when i try to insert sqrt(3)-1i, the list looks like this ['s', 'q', 'r', 't', '(', '3', ')', '-', 1, 'i'] and my programme won't work. Same problem occurs when b is a root (for example 1-sqrt(3)i). What can be done to make it work for square roots as well? (I need numpy later on to calculate angles, that's why converting 'i' into 'j' is important)
A:
Works by using
I (from sympy) rather than 1j
builtin abs function which calls sympby.Abs for complex arguments
Code
from sympy import I
complexnr = 1 + I # use I rather than 1j
print(abs(complexnr)) # also works with np.abs and np.absolute
Output
A:
If you want to use SymPy, you have to write the complex numbers as sympy expressions.
from sympy import *
cabs = lambda z: sqrt(re(z)**2 + im(z)**2)
complexnr = 1 + 1j
print(cabs(complexnr))
# out: 1.4142135623731
We are getting a float number because complexnr is of type complex and its real and imaginary parts are of type float. Thus, SymPy's re and im functions returns float numbers. But when sqrt receives a float number, it evaluates the result.
We can workaround this problem in two ways.
The first: if we are dealing with simple complex numbers where real and imaginary parts are integers, we can write the complex number as a string, sympify it (which means convert to a sympy expression):
complexnr = sympify("1 + 1j")
print(cabs(complexnr))
# out: sqrt(2)
A second way consist in using the complex number directly, then apply nsimplify in order to attempt to convert the resulting float number to some symbolic form:
complexnr = 1 + 1j
result = cabs(complexnr) # result is a Float number, 1.4142135623731
print(result.nsimplify())
# out: sqrt(2)
| Exact value of a root on Python | I'm writing a programme that converts complex numbers.
Right now I'm having problems with this piece of code:
import numpy
complexnr = 1+1j
mod= numpy.absolute(complexnr)
print(mod)
The output of this code is:
1.4142135623730951
I would like to get √2 as the output.
I have been advised to use the sympy module but I have had no luck with this either. What would be the easiest way to get this result?
EDIT
input_list = ["Enter your complex number (a+bi): ", \
"Degrees or radians?", \
"To how many decimal places do you want to round the argument?"]
output = multenterbox(text, title, input_list)
algebraline = output[0]
choice = output[1]
round2 = int(output[2])
#converting complex number to a suitable form for numpy
if "i" in algebraline:
j = algebraline.replace("i","j")
indeks = algebraline.index("i")
list = []
for element in algebraline:
list.append(element)
if "i" in algebraline and algebraline[indeks-1]=="+" or algebraline[indeks-1]=="-":
list.insert(indeks, 1)
x = "".join(str(e) for e in list)
j = x.replace("i","j")
arv = eval(j)
elif "i" not in algebraline:
arv = eval(algebraline)
#let's find the module
a = int(list[0])
b = int(list[2])
module = sqrt(a**2+b**2)
this method works well when the complex number is 1+i for example, however when i try to insert sqrt(3)-1i, the list looks like this ['s', 'q', 'r', 't', '(', '3', ')', '-', 1, 'i'] and my programme won't work. Same problem occurs when b is a root (for example 1-sqrt(3)i). What can be done to make it work for square roots as well? (I need numpy later on to calculate angles, that's why converting 'i' into 'j' is important)
| [
"Works by using\n\nI (from sympy) rather than 1j\nbuiltin abs function which calls sympby.Abs for complex arguments\n\nCode\nfrom sympy import I\n\ncomplexnr = 1 + I # use I rather than 1j\nprint(abs(complexnr)) # also works with np.abs and np.absolute\n\nOutput\n\n",
"If you want to use SymPy, you have to write the complex numbers as sympy expressions.\nfrom sympy import *\ncabs = lambda z: sqrt(re(z)**2 + im(z)**2)\ncomplexnr = 1 + 1j\nprint(cabs(complexnr))\n# out: 1.4142135623731\n\nWe are getting a float number because complexnr is of type complex and its real and imaginary parts are of type float. Thus, SymPy's re and im functions returns float numbers. But when sqrt receives a float number, it evaluates the result.\nWe can workaround this problem in two ways.\nThe first: if we are dealing with simple complex numbers where real and imaginary parts are integers, we can write the complex number as a string, sympify it (which means convert to a sympy expression):\ncomplexnr = sympify(\"1 + 1j\")\nprint(cabs(complexnr))\n# out: sqrt(2)\n\nA second way consist in using the complex number directly, then apply nsimplify in order to attempt to convert the resulting float number to some symbolic form:\ncomplexnr = 1 + 1j\nresult = cabs(complexnr) # result is a Float number, 1.4142135623731\nprint(result.nsimplify())\n# out: sqrt(2)\n\n"
] | [
1,
0
] | [] | [] | [
"numpy",
"python",
"sympy"
] | stackoverflow_0074674649_numpy_python_sympy.txt |
Q:
Python folium - Circle not working along with popup
I found some nice solutions here:
How to create on click popup which includes plots using ipyleaflet, Folium or Geemap?
which potentially would allow me to assign more things to the marker when it's clicked. In my situation I have a lot of circles assigned to the marker, but they appear all which doesn't look well.
I need the folium.Circle populated at the moment when I click on the marker. It could appear along with the pop-up information.
My code looks as follows:
fm = folium.Marker(
location=[lat,lng],
popup=folium.Popup(max_width=450).add_child(
folium.Circle(
[lat,lng],
radius=10,
fill=True,
weight=0.2)),
icon = folium.Icon(color='darkpurple', icon='glyphicon-briefcase'))
map.add_child(fm)
Unfortunately, it doesn't work, as my map comes without some features:
Despite no error from Python's console side, I have an error in the map console
Uncaught TypeError: Cannot read properties of undefined (reading 'addLayer')
at i.addTo (leaflet.js:5:64072)
and I have no faintest idea how to solve it
Is there any option of making my circle populated just when clicked on the marker?
A:
To create a marker on a folium map that displays a circle when clicked, you can use the following steps:
First, create a marker on the map using the folium.Marker class and specify the location and any popup information you want to display when the marker is clicked.
fm = folium.Marker(
location=[lat, lng],
popup=folium.Popup(max_width=450).add_child(
folium.Vega(data, width=450, height=250)),
icon=folium.Icon(color='darkpurple', icon='glyphicon-briefcase'))
Next, create a circle using the folium.Circle class and specify the location and radius of the circle.
circle = folium.Circle(
[lat, lng],
radius=10,
fill=True,
weight=0.2)
To make the circle appear only when the marker is clicked, you can add the circle to the marker's popup attribute using the add_to() method.
fm.popup.add_child(circle)
Finally, add the marker to the map using the add_child() method.
map.add_child(fm)
Here is an example of what the final code might look like:
fm = folium.Marker(
location=[lat, lng],
popup=folium.Popup(max_width=450),
icon=folium.Icon(color='darkpurple', icon='glyphicon-briefcase'))
circle = folium.Circle(
[lat, lng],
radius=10,
fill=True,
weight=0.2)
fm.popup.add_child(circle)
map.add_child(fm)
A:
Not necessarily the best approach - but a smooth alternative to @gentleslaughter's implementation:
You could use a click_action argument in folium.Marker with a JavaScript function that will add the circle to the map whenever the marker is clicked!
js_f= """
function onClick(e) {
var circle = L.circle([e.latlng.lat, e.latlng.lng], {radius: 10, fill: true, weight: 0.2}).addTo(map);
}
"""
Here the exact same folium.Marker with the click_action:
fm = folium.Marker(
location=[lat, lng],
popup=folium.Popup(max_width=450),
icon=folium.Icon(color='darkpurple', icon='glyphicon-briefcase'),
click_action=js_f,
)
map.add_child(fm)
| Python folium - Circle not working along with popup | I found some nice solutions here:
How to create on click popup which includes plots using ipyleaflet, Folium or Geemap?
which potentially would allow me to assign more things to the marker when it's clicked. In my situation I have a lot of circles assigned to the marker, but they appear all which doesn't look well.
I need the folium.Circle populated at the moment when I click on the marker. It could appear along with the pop-up information.
My code looks as follows:
fm = folium.Marker(
location=[lat,lng],
popup=folium.Popup(max_width=450).add_child(
folium.Circle(
[lat,lng],
radius=10,
fill=True,
weight=0.2)),
icon = folium.Icon(color='darkpurple', icon='glyphicon-briefcase'))
map.add_child(fm)
Unfortunately, it doesn't work, as my map comes without some features:
Despite no error from Python's console side, I have an error in the map console
Uncaught TypeError: Cannot read properties of undefined (reading 'addLayer')
at i.addTo (leaflet.js:5:64072)
and I have no faintest idea how to solve it
Is there any option of making my circle populated just when clicked on the marker?
| [
"To create a marker on a folium map that displays a circle when clicked, you can use the following steps:\n\nFirst, create a marker on the map using the folium.Marker class and specify the location and any popup information you want to display when the marker is clicked.\n\nfm = folium.Marker(\n location=[lat, lng],\n popup=folium.Popup(max_width=450).add_child(\n folium.Vega(data, width=450, height=250)),\n icon=folium.Icon(color='darkpurple', icon='glyphicon-briefcase'))\n\n\nNext, create a circle using the folium.Circle class and specify the location and radius of the circle.\n\ncircle = folium.Circle(\n [lat, lng],\n radius=10,\n fill=True,\n weight=0.2)\n\n\nTo make the circle appear only when the marker is clicked, you can add the circle to the marker's popup attribute using the add_to() method.\n\nfm.popup.add_child(circle)\n\n\nFinally, add the marker to the map using the add_child() method.\n\nmap.add_child(fm)\n\nHere is an example of what the final code might look like:\nfm = folium.Marker(\n location=[lat, lng],\n popup=folium.Popup(max_width=450),\n icon=folium.Icon(color='darkpurple', icon='glyphicon-briefcase'))\n\ncircle = folium.Circle(\n [lat, lng],\n radius=10,\n fill=True,\n weight=0.2)\n\nfm.popup.add_child(circle)\nmap.add_child(fm)\n\n",
"Not necessarily the best approach - but a smooth alternative to @gentleslaughter's implementation:\nYou could use a click_action argument in folium.Marker with a JavaScript function that will add the circle to the map whenever the marker is clicked!\njs_f= \"\"\"\n function onClick(e) {\n var circle = L.circle([e.latlng.lat, e.latlng.lng], {radius: 10, fill: true, weight: 0.2}).addTo(map);\n }\n\"\"\"\n\nHere the exact same folium.Marker with the click_action:\nfm = folium.Marker(\n location=[lat, lng],\n popup=folium.Popup(max_width=450),\n icon=folium.Icon(color='darkpurple', icon='glyphicon-briefcase'),\n click_action=js_f,\n)\nmap.add_child(fm)\n\n"
] | [
0,
0
] | [] | [] | [
"folium",
"leaflet",
"python"
] | stackoverflow_0074520790_folium_leaflet_python.txt |
Q:
Left join in (flask)sqlalchemy with getting unmatched values and filter on the right table
I want to get a list of all assignments, with the progress of the user (the UserAssignments table) also in the result set. That means there should be a join between the assignments and userassignments table (where the assignmentid is equal), but also a filter to check if the progress is from the current user. The diagram of the database and the actual models are listed below.
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), index=True, unique=True, nullable=False)
password_hash = db.Column(db.String(128), nullable=False)
roleid = db.Column(db.Integer, db.ForeignKey('role.roleid'), nullable=False)
groups = db.relationship('Group', secondary=users_groups, lazy='dynamic')
assignments = db.relationship('Assignment', secondary=users_assignments, lazy='dynamic')
class Assignment(db.Model):
assignmentid = db.Column(db.Integer, primary_key=True)
assignmentname = db.Column(db.String(128))
assignmentranking = db.Column(db.Integer)
assignmentquestion = db.Column(db.String, nullable=False)
def __repr__(self):
return '<Assignment {}>'.format(self.assignmentid)
class UserAssignments(db.Model):
__tablename__ = 'user_assignments'
userid = db.Column(db.Integer, db.ForeignKey('user.id'), primary_key=True)
assignmentid = db.Column(db.Integer, db.ForeignKey('assignment.assignmentid'), primary_key=True)
status = db.Column(db.Integer)
progress = db.Column(db.String)
def __repr__(self):
return '<UserAssignments {}>'.format(self.userid, self.assignmentid)
diagram
I tried the following query, but that resulted only the assignments with a matched userassignment (progress). (the userid is given into the function)
results = db.session.query(Assignment, UserAssignments).join(UserAssignments, (UserAssignments.assignmentid == Assignment.assignmentid)&(UserAssignments.userid==userid), isouter=True).filter(UserAssignments.userid==userid).all()
I also tried the query without the filter, but that resulted in all userassignments (also from other users).
results = db.session.query(Assignment, UserAssignments).join(UserAssignments, (UserAssignments.assignmentid == Assignment.assignmentid)&(UserAssignments.userid==userid), isouter=True).all()
As said earlier, I want to achieve a result with all assignments listed, with the userassignment included when there is one for the current user.
A:
try next query
results = db.session.query(
Assignment,
UserAssignments,
).join(
UserAssignments,
UserAssignments.assignmentid == Assignment.assignmentid,
isouter=True,
).filter(
or_(
UserAssignments.userid == userid,
UserAssignments.userid.is_(None),
)
).all()
| Left join in (flask)sqlalchemy with getting unmatched values and filter on the right table | I want to get a list of all assignments, with the progress of the user (the UserAssignments table) also in the result set. That means there should be a join between the assignments and userassignments table (where the assignmentid is equal), but also a filter to check if the progress is from the current user. The diagram of the database and the actual models are listed below.
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), index=True, unique=True, nullable=False)
password_hash = db.Column(db.String(128), nullable=False)
roleid = db.Column(db.Integer, db.ForeignKey('role.roleid'), nullable=False)
groups = db.relationship('Group', secondary=users_groups, lazy='dynamic')
assignments = db.relationship('Assignment', secondary=users_assignments, lazy='dynamic')
class Assignment(db.Model):
assignmentid = db.Column(db.Integer, primary_key=True)
assignmentname = db.Column(db.String(128))
assignmentranking = db.Column(db.Integer)
assignmentquestion = db.Column(db.String, nullable=False)
def __repr__(self):
return '<Assignment {}>'.format(self.assignmentid)
class UserAssignments(db.Model):
__tablename__ = 'user_assignments'
userid = db.Column(db.Integer, db.ForeignKey('user.id'), primary_key=True)
assignmentid = db.Column(db.Integer, db.ForeignKey('assignment.assignmentid'), primary_key=True)
status = db.Column(db.Integer)
progress = db.Column(db.String)
def __repr__(self):
return '<UserAssignments {}>'.format(self.userid, self.assignmentid)
diagram
I tried the following query, but that resulted only the assignments with a matched userassignment (progress). (the userid is given into the function)
results = db.session.query(Assignment, UserAssignments).join(UserAssignments, (UserAssignments.assignmentid == Assignment.assignmentid)&(UserAssignments.userid==userid), isouter=True).filter(UserAssignments.userid==userid).all()
I also tried the query without the filter, but that resulted in all userassignments (also from other users).
results = db.session.query(Assignment, UserAssignments).join(UserAssignments, (UserAssignments.assignmentid == Assignment.assignmentid)&(UserAssignments.userid==userid), isouter=True).all()
As said earlier, I want to achieve a result with all assignments listed, with the userassignment included when there is one for the current user.
| [
"try next query\nresults = db.session.query(\n Assignment, \n UserAssignments,\n).join(\n UserAssignments, \n UserAssignments.assignmentid == Assignment.assignmentid, \n isouter=True,\n).filter(\n or_(\n UserAssignments.userid == userid,\n UserAssignments.userid.is_(None),\n )\n).all()\n\n"
] | [
0
] | [] | [] | [
"flask_sqlalchemy",
"python",
"sqlalchemy"
] | stackoverflow_0074675033_flask_sqlalchemy_python_sqlalchemy.txt |
Q:
Counting number of 2D points with both coordinates higher than given point in O(logn)
Given a set of 2D points (x1, y1)...(xn, yn) and one point from that same set (xi, yi), I need to return the number of points such that their x coordinate is bigger than xi and y coordinate is bigger than yi.
Approach is limited to usage of basic data structures such as Array, List (incl. Linked), Stack, Queue, Trees (Binary, BST, AVL, B-tree), Hash Table and Heap.
Required time complexity is O(logn) and you can assume the data is given to you in a data structure of your choice (from the ones provided above), however you wish it sorted and is not counted towards the time complexity.
Because the requirement is O(logn), I thought about having the x-coordinates sorted in an Array and using Binary Search. Though I'm not sure how to have the y-coordinates stored in a way that will preserve the time complexity.
I could most certainly use any hint at resolving this.
A:
One way to solve this problem in O(logn) time is to use a binary search tree. We can insert the points into the tree, sorted by their x coordinate similar to your suggestion (except using a BST struct) and then do a binary search for the point (xi, yi).
For each node in the tree, we can check if the y coordinate is greater than yi. If it is, we can add it to our count and then search the right subtree. We can repeat this process until we have reached the leaf nodes. This approach should take O(logn) time to complete.
| Counting number of 2D points with both coordinates higher than given point in O(logn) | Given a set of 2D points (x1, y1)...(xn, yn) and one point from that same set (xi, yi), I need to return the number of points such that their x coordinate is bigger than xi and y coordinate is bigger than yi.
Approach is limited to usage of basic data structures such as Array, List (incl. Linked), Stack, Queue, Trees (Binary, BST, AVL, B-tree), Hash Table and Heap.
Required time complexity is O(logn) and you can assume the data is given to you in a data structure of your choice (from the ones provided above), however you wish it sorted and is not counted towards the time complexity.
Because the requirement is O(logn), I thought about having the x-coordinates sorted in an Array and using Binary Search. Though I'm not sure how to have the y-coordinates stored in a way that will preserve the time complexity.
I could most certainly use any hint at resolving this.
| [
"One way to solve this problem in O(logn) time is to use a binary search tree. We can insert the points into the tree, sorted by their x coordinate similar to your suggestion (except using a BST struct) and then do a binary search for the point (xi, yi).\nFor each node in the tree, we can check if the y coordinate is greater than yi. If it is, we can add it to our count and then search the right subtree. We can repeat this process until we have reached the leaf nodes. This approach should take O(logn) time to complete.\n"
] | [
0
] | [] | [] | [
"algorithm",
"data_structures"
] | stackoverflow_0074664742_algorithm_data_structures.txt |
Q:
C code stops executing in the middle and comes to a standstill. Cannot figure out where I went wrong
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<string.h>
#include<conio.h>
int choice,i,j,semester_fee=2400, number=0; //number is number of buses in system
char user;//which user
FILE *bptr;
struct details{
int bus_number;
char route[100];
char busdriver[100];
long int drivercontact;
char seat[32][14];
};
void login(struct details array[], int number);
void bus(struct details array[], int number);//installing new bus
void record(void);//displaying existing buses
void newpassword(void);
void reservation(void);
void cancel(void);
void exit(void);
int main(){
struct details array[number];
printf("Press 'a' for accessing admin options and 's' for accessing student/passenger options\n");
scanf(" %c", &user);
system("cls");
switch(user){
case 'a':
case 'A':
login(array, number);
system("cls");
do{
printf("Please press the corresponding number to select one of the following options:\n");
printf("1. Install new bus\n");
printf("2. View Details\n");
printf("3. Update password\n");
printf("4. Exit\n");
scanf("%d", &choice);
system("cls");
switch(choice){
case 1: bus(array, number);
break;
}}while(choice!=4);
break;
}
return 0;
}
void login(struct details array[], int number){
system("cls");
char name[20]={0}, pass[20]={0}, sign_name[20]={0}, sign_pass[20]={0}, ch;
int result_name, result_pass, option, i=0;
FILE*fptr;
printf("\t\tSelect a number:\n");
printf("\t\t 1. Register\n");
printf("\t\t 2. Sign in\n");
printf("Your choice: ");
scanf("%d", &option);
if(option==1){
fptr=fopen("Signin_details.txt", "w+");
printf("Username: ");
fflush(stdin);
gets(name);
printf("Password: ");
fflush(stdin);
gets(pass);
printf("You have successfully registered!");
fprintf(fptr, "%s\n%s", name, pass);
fclose(fptr);
printf("\nPress any key to continue ... ");
getch();
bus(array, number);
}
if(option==2){
fptr=fopen("Signin_details.txt", "r");
rewind(fptr); //sets pointer back to start of file
while((ch=fgetc(fptr))==NULL) printf("\nPlease register yourself first\n");
printf("Username: ");
fflush(stdin);//refresh
gets(sign_name);
printf("Password: ");
fflush(stdin);
gets(sign_pass);
char line[20];
fgets(line, sizeof(line), fptr);
result_name=strcmp(sign_name, line);
fgets(line, sizeof(line), fptr);
result_pass=strcmp(sign_pass, line);
if(result_name==0 && result_pass==0) printf("\nWelcome user %s!", name);
while(result_pass!=0 || result_name!=0){
printf("\nWrong login details. Please enter them again.\n\n");
printf("Username: ");
fflush(stdin);//refresh
gets(sign_name);
printf("Password: ");
fflush(stdin);
gets(sign_pass);
char line[20];
rewind(fptr);
fgets(line, sizeof(line), fptr);
result_name=strcmp(sign_name, line);
fgets(line, sizeof(line), fptr);
result_pass=strcmp(sign_pass, line);
}
getch();}
fclose(fptr);
}
void bus(struct details array[], int number){
int no;
printf("Enter the number of buses you want to add: ");
scanf("%d", &no);
number+=no;
int busno;
FILE* fptr[no];
char buffer[6];
char string1[5],ch;
char string2[5];
for(int i=0; i<no; i++){
printf("Enter the bus number: ");
scanf("%d", &busno);
sprintf(string1, "%d", busno);//converts the integer busno to text and stores it in string1
bptr=fopen("buses.txt", "a+");
for(int j=0; j<i; j++){
if(busno==array[j].bus_number){
while(busno==array[j].bus_number){
printf("Bus data already exists. Enter another bus number: ");
scanf("%d", &busno);
}break;}
}
fseek(bptr, 0, SEEK_SET); //sets pointer to beginning of file buses
while(fgets(string2, 5, bptr)){
while(strcmp(string1, string2)==0){
printf("Bus data already exists. Enter another bus number: ");
scanf("%d", &busno);
sprintf(string1, "%d", busno);
}}
fprintf(bptr, "%d\n", busno);
fclose(bptr);
array[i].bus_number=busno;
sprintf(buffer, "%d.txt", busno);
fptr[i]=fopen(buffer, "w+");
printf("Enter the bus driver's name: ");
fflush(stdin);
gets(array[i].busdriver);
printf("Enter bus driver contact: ");
scanf("%ld", &array[i].drivercontact);
printf("Enter route of the bus: ");
fflush(stdin);
gets(array[i].route);
fprintf(fptr[i],"Bus number: %d\tSemester fee: %d\t\tBus Driver contact number: %ld\t\tBus Driver: %s\t\tRoute: %s\n", busno, semester_fee, array[i].drivercontact, array[i].busdriver, array[i].route);
for(int n=0; n<32;n++){
char s2[6]="Empty";
strcpy(array[i].seat[n], s2);}
int l=0;
for(int j=0; j<4; j++){
for(int k=0; k<8; k++){
fprintf(fptr, "\t%d. %s\t", ++l, array[i].seat[l]);
}
fprintf(fptr, "\n");}
fclose(fptr[i]);}
printf("Press any key to continue ... ");
getch();
system("cls");
}
I am working on a project which would reserve bus seats. This is a small piece of code. Since I am just getting started, there might be a lot of details missing. But for this piece of code I am trying to take login information from the user and inputting each bus data. Everytime I enter a bus data, it gets saved in a new file and the bus number is also saved in another file "buses.txt". However when I execute it, my login does not work, and after the input of the first bus number, the program stops executing and comes to a standstill. In the login part, it registers and saves the sign-in details in another file, but when I want to sign-in, it won't let me and says "wrong sign-in details". What am I doing wrong in the two parts?
A:
You define a global int:
int number=0; //number is number of buses in system
You then define an array ... of size 0
struct details array[number];
By the way some compilers will provide a warning for this.
Then in login you access the array, which is probably where you crash.
| C code stops executing in the middle and comes to a standstill. Cannot figure out where I went wrong | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<string.h>
#include<conio.h>
int choice,i,j,semester_fee=2400, number=0; //number is number of buses in system
char user;//which user
FILE *bptr;
struct details{
int bus_number;
char route[100];
char busdriver[100];
long int drivercontact;
char seat[32][14];
};
void login(struct details array[], int number);
void bus(struct details array[], int number);//installing new bus
void record(void);//displaying existing buses
void newpassword(void);
void reservation(void);
void cancel(void);
void exit(void);
int main(){
struct details array[number];
printf("Press 'a' for accessing admin options and 's' for accessing student/passenger options\n");
scanf(" %c", &user);
system("cls");
switch(user){
case 'a':
case 'A':
login(array, number);
system("cls");
do{
printf("Please press the corresponding number to select one of the following options:\n");
printf("1. Install new bus\n");
printf("2. View Details\n");
printf("3. Update password\n");
printf("4. Exit\n");
scanf("%d", &choice);
system("cls");
switch(choice){
case 1: bus(array, number);
break;
}}while(choice!=4);
break;
}
return 0;
}
void login(struct details array[], int number){
system("cls");
char name[20]={0}, pass[20]={0}, sign_name[20]={0}, sign_pass[20]={0}, ch;
int result_name, result_pass, option, i=0;
FILE*fptr;
printf("\t\tSelect a number:\n");
printf("\t\t 1. Register\n");
printf("\t\t 2. Sign in\n");
printf("Your choice: ");
scanf("%d", &option);
if(option==1){
fptr=fopen("Signin_details.txt", "w+");
printf("Username: ");
fflush(stdin);
gets(name);
printf("Password: ");
fflush(stdin);
gets(pass);
printf("You have successfully registered!");
fprintf(fptr, "%s\n%s", name, pass);
fclose(fptr);
printf("\nPress any key to continue ... ");
getch();
bus(array, number);
}
if(option==2){
fptr=fopen("Signin_details.txt", "r");
rewind(fptr); //sets pointer back to start of file
while((ch=fgetc(fptr))==NULL) printf("\nPlease register yourself first\n");
printf("Username: ");
fflush(stdin);//refresh
gets(sign_name);
printf("Password: ");
fflush(stdin);
gets(sign_pass);
char line[20];
fgets(line, sizeof(line), fptr);
result_name=strcmp(sign_name, line);
fgets(line, sizeof(line), fptr);
result_pass=strcmp(sign_pass, line);
if(result_name==0 && result_pass==0) printf("\nWelcome user %s!", name);
while(result_pass!=0 || result_name!=0){
printf("\nWrong login details. Please enter them again.\n\n");
printf("Username: ");
fflush(stdin);//refresh
gets(sign_name);
printf("Password: ");
fflush(stdin);
gets(sign_pass);
char line[20];
rewind(fptr);
fgets(line, sizeof(line), fptr);
result_name=strcmp(sign_name, line);
fgets(line, sizeof(line), fptr);
result_pass=strcmp(sign_pass, line);
}
getch();}
fclose(fptr);
}
void bus(struct details array[], int number){
int no;
printf("Enter the number of buses you want to add: ");
scanf("%d", &no);
number+=no;
int busno;
FILE* fptr[no];
char buffer[6];
char string1[5],ch;
char string2[5];
for(int i=0; i<no; i++){
printf("Enter the bus number: ");
scanf("%d", &busno);
sprintf(string1, "%d", busno);//converts the integer busno to text and stores it in string1
bptr=fopen("buses.txt", "a+");
for(int j=0; j<i; j++){
if(busno==array[j].bus_number){
while(busno==array[j].bus_number){
printf("Bus data already exists. Enter another bus number: ");
scanf("%d", &busno);
}break;}
}
fseek(bptr, 0, SEEK_SET); //sets pointer to beginning of file buses
while(fgets(string2, 5, bptr)){
while(strcmp(string1, string2)==0){
printf("Bus data already exists. Enter another bus number: ");
scanf("%d", &busno);
sprintf(string1, "%d", busno);
}}
fprintf(bptr, "%d\n", busno);
fclose(bptr);
array[i].bus_number=busno;
sprintf(buffer, "%d.txt", busno);
fptr[i]=fopen(buffer, "w+");
printf("Enter the bus driver's name: ");
fflush(stdin);
gets(array[i].busdriver);
printf("Enter bus driver contact: ");
scanf("%ld", &array[i].drivercontact);
printf("Enter route of the bus: ");
fflush(stdin);
gets(array[i].route);
fprintf(fptr[i],"Bus number: %d\tSemester fee: %d\t\tBus Driver contact number: %ld\t\tBus Driver: %s\t\tRoute: %s\n", busno, semester_fee, array[i].drivercontact, array[i].busdriver, array[i].route);
for(int n=0; n<32;n++){
char s2[6]="Empty";
strcpy(array[i].seat[n], s2);}
int l=0;
for(int j=0; j<4; j++){
for(int k=0; k<8; k++){
fprintf(fptr, "\t%d. %s\t", ++l, array[i].seat[l]);
}
fprintf(fptr, "\n");}
fclose(fptr[i]);}
printf("Press any key to continue ... ");
getch();
system("cls");
}
I am working on a project which would reserve bus seats. This is a small piece of code. Since I am just getting started, there might be a lot of details missing. But for this piece of code I am trying to take login information from the user and inputting each bus data. Everytime I enter a bus data, it gets saved in a new file and the bus number is also saved in another file "buses.txt". However when I execute it, my login does not work, and after the input of the first bus number, the program stops executing and comes to a standstill. In the login part, it registers and saves the sign-in details in another file, but when I want to sign-in, it won't let me and says "wrong sign-in details". What am I doing wrong in the two parts?
| [
"You define a global int:\nint number=0; //number is number of buses in system\n\nYou then define an array ... of size 0\nstruct details array[number];\n\nBy the way some compilers will provide a warning for this.\nThen in login you access the array, which is probably where you crash.\n"
] | [
0
] | [] | [] | [
"c",
"file_handling"
] | stackoverflow_0074675053_c_file_handling.txt |
Q:
How to run a python flask application on https from EC2 machine on HTTPS
I have written a small python application using flask. I have deployed the application on EC2 machine. Currently the application run as http. I want the application on https. How can I do it? instead of doing any modification in code like adding ssl_context?
A:
You shouldn't need to be touching ssl_context. What you are trying to do is to just run your web server over HTTPS.
Depending on the web server you have (Nginx, Apache etc.), this is just a case of getting/generating a SSL certificate (you can use CertBot for this - this generates certificates via LetsEncrypt).
Once you have a SSL certificate, you then need to configure your web server to use this SSL certificate (as well as allow HTTPS connections on your firewall settings).
These guides helped me in the past, but you can always look online for a guide that suits the server OS you are using:
https://www.digitalocean.com/community/tutorials/how-to-secure-apache-with-let-s-encrypt-on-ubuntu-20-04
https://www.nginx.com/blog/using-free-ssltls-certificates-from-lets-encrypt-with-nginx/
Hope this helps!
| How to run a python flask application on https from EC2 machine on HTTPS | I have written a small python application using flask. I have deployed the application on EC2 machine. Currently the application run as http. I want the application on https. How can I do it? instead of doing any modification in code like adding ssl_context?
| [
"You shouldn't need to be touching ssl_context. What you are trying to do is to just run your web server over HTTPS.\nDepending on the web server you have (Nginx, Apache etc.), this is just a case of getting/generating a SSL certificate (you can use CertBot for this - this generates certificates via LetsEncrypt).\nOnce you have a SSL certificate, you then need to configure your web server to use this SSL certificate (as well as allow HTTPS connections on your firewall settings).\nThese guides helped me in the past, but you can always look online for a guide that suits the server OS you are using:\n\nhttps://www.digitalocean.com/community/tutorials/how-to-secure-apache-with-let-s-encrypt-on-ubuntu-20-04\nhttps://www.nginx.com/blog/using-free-ssltls-certificates-from-lets-encrypt-with-nginx/\n\nHope this helps!\n"
] | [
1
] | [] | [] | [
"amazon_ec2",
"flask",
"python_3.x"
] | stackoverflow_0074674622_amazon_ec2_flask_python_3.x.txt |
Q:
HTML Section, No space to the right and bottom
I am currently making my own admin panel in html, css and js and i basically have one main section which content will swap based on the selection in an sidebar.
I now want to center this section and make it have 10px space to every side using this code:
HTML:
<section id="home">
<div class="h1">Welcome to Cashlyte</div>
<div class="h2">The future of link advertising</div>
</section>
CSS:
margin-top: 10px;
margin-left: 10px;
margin-bottom: 10px;
margin-right: 10px;
But for some reason it does not have any space to the bottom and right side.
Here is the full code on codepen: https://codepen.io/lewopumo-biz-art-biz/pen/RwJqebB
A:
This is because your element uses absolute positioning that ignores margins. What you can do is add a spacer to your div like so:
#home:after {
content: '';
position: absolute;
bottom: -40px;
height: 40px;
width: 1px;
}
| HTML Section, No space to the right and bottom | I am currently making my own admin panel in html, css and js and i basically have one main section which content will swap based on the selection in an sidebar.
I now want to center this section and make it have 10px space to every side using this code:
HTML:
<section id="home">
<div class="h1">Welcome to Cashlyte</div>
<div class="h2">The future of link advertising</div>
</section>
CSS:
margin-top: 10px;
margin-left: 10px;
margin-bottom: 10px;
margin-right: 10px;
But for some reason it does not have any space to the bottom and right side.
Here is the full code on codepen: https://codepen.io/lewopumo-biz-art-biz/pen/RwJqebB
| [
"This is because your element uses absolute positioning that ignores margins. What you can do is add a spacer to your div like so:\n#home:after {\n content: '';\n position: absolute;\n bottom: -40px;\n height: 40px;\n width: 1px;\n}\n\n"
] | [
0
] | [] | [] | [
"css",
"html",
"javascript"
] | stackoverflow_0074675093_css_html_javascript.txt |
Q:
Difference between number of comparisons and the growth of the number of the comparisons of an algorithm
What is the difference between number of comparisons and the growth of the number of the comparisons of an algorithm ? For example for a binary search and a ternary search.
I understand that the number of comparisons is a fixed number for a specific case, but the growth takes into consideration the worst case scenario (when the element is in the 2/3 of the list in the ternary search). But I don't know if I'm right or not or if I missed something important
A:
The number of comparisons is the total number of comparisons made by an algorithm during its execution. In contrast, the growth of the number of comparisons is the rate at which the number of comparisons increases with respect to the size of the input.
For example, a binary search algorithm has a number of comparisons of O(log n) where n is the input size. This means that the number of comparisons increases at a rate of log n with respect to the input size. In contrast, a ternary search algorithm has a number of comparisons of O(log3 n). This means that the number of comparisons increases at a rate of log3 n with respect to the input size.
| Difference between number of comparisons and the growth of the number of the comparisons of an algorithm | What is the difference between number of comparisons and the growth of the number of the comparisons of an algorithm ? For example for a binary search and a ternary search.
I understand that the number of comparisons is a fixed number for a specific case, but the growth takes into consideration the worst case scenario (when the element is in the 2/3 of the list in the ternary search). But I don't know if I'm right or not or if I missed something important
| [
"The number of comparisons is the total number of comparisons made by an algorithm during its execution. In contrast, the growth of the number of comparisons is the rate at which the number of comparisons increases with respect to the size of the input.\nFor example, a binary search algorithm has a number of comparisons of O(log n) where n is the input size. This means that the number of comparisons increases at a rate of log n with respect to the input size. In contrast, a ternary search algorithm has a number of comparisons of O(log3 n). This means that the number of comparisons increases at a rate of log3 n with respect to the input size.\n"
] | [
0
] | [] | [] | [
"algorithm",
"big_o",
"binary_search",
"search",
"ternary_search"
] | stackoverflow_0074662300_algorithm_big_o_binary_search_search_ternary_search.txt |
Q:
shutil.move(os.path.join(...) does not work
This program creates new directories and automatically moves files there, if keywords match. Only step 4 of my program does not work - files are not moved from source foulder to destination folder. Everything else works.*
Works as follows:
we have a [source folder], we have a [destination folder], we have a [keyword folder]
Look for .txt file in [keyword folder]
Create child destination directory named after .txt file in [destination folder]
Get keywords from .txt file
If keyword found in sourcefile -> move sourcefile from [source folder] to child in [destination folder]
My program works, except for step 4. No Error message. Files just stay where they are.
Program ran smoothly in my prototype when hardcoded and used absolute paths.
In the code you find it under #### NOT WORKING:
You can try the program yourself: https://github.com/kormuch/python_file_organizer
(also in includes dummy .txt files for simulation purposes)
import os
import shutil
#directories with relative paths
dir_source_files = r'source files'
dir_destination = r'destination'
dir_keyword_files = r'keywords'
def create_folders_and_move_files():
scanned_dir_keyword_files = os.scandir(dir_keyword_files)
for file in scanned_dir_keyword_files: #creates new dirs named after .txt files in destination folder
print((f".txt file found in dir_keyword_files:\n{file} name of new folder: " + os.path.splitext(file.name)[0]))
new_dir_name = str(os.path.splitext(file.name)[0])
path_for_new_dir = os.path.join(dir_destination, new_dir_name)
try:
os.makedirs(path_for_new_dir, exist_ok = False)
print(f"directory creation succesful. created directory:{new_dir_name}\n")
except OSError as error:
print(f"directory creation not succesful. '{new_dir_name}' already exists\n")
# now a new directory is created and has the same name as the .txt file
scanned_dir_keyword_files = os.scandir(dir_keyword_files)
for txt_file_with_keywords in scanned_dir_keyword_files:
print(f"TASK 1: iterating through parent txt-keywordfile:\n {txt_file_with_keywords} ")
keywordlist_a = []
with open(txt_file_with_keywords) as txt_full_with_hashtags:
for keyword_with_hashtags in txt_full_with_hashtags.readlines():
keywords_without_hashtags = keyword_with_hashtags.rstrip().split('#') #automatically creates list and removes the hashtag from every keyword
keywords_without_hashtags.remove('') #removes empty entries from list
print(f"TASK 2: fill child keywordlist_a with keywords from parent:")
for keyword_without_hashtag in keywords_without_hashtags:
keywordlist_a.append(keyword_without_hashtag)
print('keyword added to keywordlist_a: ' + keyword_without_hashtag)
print("keywordlist_a ready:")
print(keywordlist_a)
#### NOT WORKING:
for scanned_dir_destination in os.scandir(dir_destination):
print("This is the destination path " + str(os.path.realpath(scanned_dir_destination)))
for keyword_a in keywordlist_a:
for scanned_scource_file in os.scandir(dir_source_files):
print(f"TASK 3: comparing keyword '{keyword_a}' to '{scanned_scource_file}'")
if keyword_a.lower() in str(scanned_scource_file).lower():
print(f"Bingo! '{keyword_a}' found in '{scanned_scource_file}'")
try:
shutil.move(os.path.join(scanned_scource_file.path, scanned_scource_file), scanned_dir_destination)
except:
pass
print("\n")
create_folders_and_move_files()
Output:
.txt file found in dir_keyword_files:
<DirEntry 'audiobooks.txt'> name of new folder: audiobooks
directory creation succesful. created directory:audiobooks
.txt file found in dir_keyword_files:
<DirEntry 'movies.txt'> name of new folder: movies
directory creation succesful. created directory:movies
TASK 1: iterating through parent txt-keywordfile:
<DirEntry 'audiobooks.txt'>
TASK 2: fill child keywordlist_a with keywords from parent:
keyword added to keywordlist_a: audiobook
keyword added to keywordlist_a: Hörbuch
keyword added to keywordlist_a: podcast
keywordlist_a ready:
['audiobook ', 'Hörbuch ', 'podcast']
This is the destination path C:\Users\Desktop\Desktop\Python\testing\file organizer\destination\audiobooks
TASK 3: comparing keyword 'audiobook ' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
Bingo! 'audiobook ' found in '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'audiobook ' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
Bingo! 'audiobook ' found in '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'Hörbuch ' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'Hörbuch ' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'podcast' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'podcast' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
This is the destination path C:\Users\Desktop\Desktop\Python\testing\file organizer\destination\movies
TASK 3: comparing keyword 'audiobook ' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
Bingo! 'audiobook ' found in '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'audiobook ' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
Bingo! 'audiobook ' found in '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'Hörbuch ' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'Hörbuch ' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'podcast' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'podcast' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
TASK 1: iterating through parent txt-keywordfile:
<DirEntry 'movies.txt'>
TASK 2: fill child keywordlist_a with keywords from parent:
keyword added to keywordlist_a: movie
keyword added to keywordlist_a: full film
keyword added to keywordlist_a: star wars
keywordlist_a ready:
['movie ', 'full film ', 'star wars']
This is the destination path C:\Users\Desktop\Desktop\Python\testing\file organizer\destination\audiobooks
TASK 3: comparing keyword 'movie ' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'movie ' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'full film ' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'full film ' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'star wars' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'star wars' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
This is the destination path C:\Users\Desktop\Desktop\Python\testing\file organizer\destination\movies
TASK 3: comparing keyword 'movie ' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'movie ' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'full film ' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'full film ' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'star wars' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'star wars' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
A:
Change the appropriate line to:
shutil.move(scanned_scource_file, scanned_dir_destination)
The reason why your code doesn't work was failing shutil.move() because of not valid passed parameter raising the error:
# NotADirectoryError: [Errno 20] Not a directory: 'source files/How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt/source files/How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'
The core of the problem you were facing was the wrong expectation that scanned_source_file.path will provide the directories part without the base file name, but it does provide the entire full path file name instead.
Notice that a change of the line to:
shutil.move(scanned_scource_file.path, scanned_dir_destination)
would do the same as
shutil.move(scanned_scource_file, scanned_dir_destination)
where the first version provides the full path file name as a string and the other one as an equivalent object.
By the way: use scanned_scource_file.name instead of str(scanned_scource_file).
| shutil.move(os.path.join(...) does not work | This program creates new directories and automatically moves files there, if keywords match. Only step 4 of my program does not work - files are not moved from source foulder to destination folder. Everything else works.*
Works as follows:
we have a [source folder], we have a [destination folder], we have a [keyword folder]
Look for .txt file in [keyword folder]
Create child destination directory named after .txt file in [destination folder]
Get keywords from .txt file
If keyword found in sourcefile -> move sourcefile from [source folder] to child in [destination folder]
My program works, except for step 4. No Error message. Files just stay where they are.
Program ran smoothly in my prototype when hardcoded and used absolute paths.
In the code you find it under #### NOT WORKING:
You can try the program yourself: https://github.com/kormuch/python_file_organizer
(also in includes dummy .txt files for simulation purposes)
import os
import shutil
#directories with relative paths
dir_source_files = r'source files'
dir_destination = r'destination'
dir_keyword_files = r'keywords'
def create_folders_and_move_files():
scanned_dir_keyword_files = os.scandir(dir_keyword_files)
for file in scanned_dir_keyword_files: #creates new dirs named after .txt files in destination folder
print((f".txt file found in dir_keyword_files:\n{file} name of new folder: " + os.path.splitext(file.name)[0]))
new_dir_name = str(os.path.splitext(file.name)[0])
path_for_new_dir = os.path.join(dir_destination, new_dir_name)
try:
os.makedirs(path_for_new_dir, exist_ok = False)
print(f"directory creation succesful. created directory:{new_dir_name}\n")
except OSError as error:
print(f"directory creation not succesful. '{new_dir_name}' already exists\n")
# now a new directory is created and has the same name as the .txt file
scanned_dir_keyword_files = os.scandir(dir_keyword_files)
for txt_file_with_keywords in scanned_dir_keyword_files:
print(f"TASK 1: iterating through parent txt-keywordfile:\n {txt_file_with_keywords} ")
keywordlist_a = []
with open(txt_file_with_keywords) as txt_full_with_hashtags:
for keyword_with_hashtags in txt_full_with_hashtags.readlines():
keywords_without_hashtags = keyword_with_hashtags.rstrip().split('#') #automatically creates list and removes the hashtag from every keyword
keywords_without_hashtags.remove('') #removes empty entries from list
print(f"TASK 2: fill child keywordlist_a with keywords from parent:")
for keyword_without_hashtag in keywords_without_hashtags:
keywordlist_a.append(keyword_without_hashtag)
print('keyword added to keywordlist_a: ' + keyword_without_hashtag)
print("keywordlist_a ready:")
print(keywordlist_a)
#### NOT WORKING:
for scanned_dir_destination in os.scandir(dir_destination):
print("This is the destination path " + str(os.path.realpath(scanned_dir_destination)))
for keyword_a in keywordlist_a:
for scanned_scource_file in os.scandir(dir_source_files):
print(f"TASK 3: comparing keyword '{keyword_a}' to '{scanned_scource_file}'")
if keyword_a.lower() in str(scanned_scource_file).lower():
print(f"Bingo! '{keyword_a}' found in '{scanned_scource_file}'")
try:
shutil.move(os.path.join(scanned_scource_file.path, scanned_scource_file), scanned_dir_destination)
except:
pass
print("\n")
create_folders_and_move_files()
Output:
.txt file found in dir_keyword_files:
<DirEntry 'audiobooks.txt'> name of new folder: audiobooks
directory creation succesful. created directory:audiobooks
.txt file found in dir_keyword_files:
<DirEntry 'movies.txt'> name of new folder: movies
directory creation succesful. created directory:movies
TASK 1: iterating through parent txt-keywordfile:
<DirEntry 'audiobooks.txt'>
TASK 2: fill child keywordlist_a with keywords from parent:
keyword added to keywordlist_a: audiobook
keyword added to keywordlist_a: Hörbuch
keyword added to keywordlist_a: podcast
keywordlist_a ready:
['audiobook ', 'Hörbuch ', 'podcast']
This is the destination path C:\Users\Desktop\Desktop\Python\testing\file organizer\destination\audiobooks
TASK 3: comparing keyword 'audiobook ' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
Bingo! 'audiobook ' found in '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'audiobook ' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
Bingo! 'audiobook ' found in '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'Hörbuch ' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'Hörbuch ' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'podcast' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'podcast' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
This is the destination path C:\Users\Desktop\Desktop\Python\testing\file organizer\destination\movies
TASK 3: comparing keyword 'audiobook ' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
Bingo! 'audiobook ' found in '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'audiobook ' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
Bingo! 'audiobook ' found in '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'Hörbuch ' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'Hörbuch ' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'podcast' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'podcast' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
TASK 1: iterating through parent txt-keywordfile:
<DirEntry 'movies.txt'>
TASK 2: fill child keywordlist_a with keywords from parent:
keyword added to keywordlist_a: movie
keyword added to keywordlist_a: full film
keyword added to keywordlist_a: star wars
keywordlist_a ready:
['movie ', 'full film ', 'star wars']
This is the destination path C:\Users\Desktop\Desktop\Python\testing\file organizer\destination\audiobooks
TASK 3: comparing keyword 'movie ' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'movie ' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'full film ' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'full film ' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'star wars' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'star wars' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
This is the destination path C:\Users\Desktop\Desktop\Python\testing\file organizer\destination\movies
TASK 3: comparing keyword 'movie ' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'movie ' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'full film ' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'full film ' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'star wars' to '<DirEntry 'How to Read People Like a Book -James W. Williams -Full Audiobook (192kbit_AAC).m4a.txt'>'
TASK 3: comparing keyword 'star wars' to '<DirEntry 'How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'>'
| [
"Change the appropriate line to:\nshutil.move(scanned_scource_file, scanned_dir_destination)\n\nThe reason why your code doesn't work was failing shutil.move() because of not valid passed parameter raising the error:\n# NotADirectoryError: [Errno 20] Not a directory: 'source files/How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt/source files/How to Talk to Anyone 92 Little Tricks for Big Success in Relationships Audiobook (128kbit_AAC).m4a.txt'\n\nThe core of the problem you were facing was the wrong expectation that scanned_source_file.path will provide the directories part without the base file name, but it does provide the entire full path file name instead.\nNotice that a change of the line to:\nshutil.move(scanned_scource_file.path, scanned_dir_destination)\n\nwould do the same as\nshutil.move(scanned_scource_file, scanned_dir_destination)\n\nwhere the first version provides the full path file name as a string and the other one as an equivalent object.\nBy the way: use scanned_scource_file.name instead of str(scanned_scource_file).\n"
] | [
1
] | [] | [] | [
"file_move",
"operating_system",
"python_3.x",
"shutil"
] | stackoverflow_0074674733_file_move_operating_system_python_3.x_shutil.txt |
Q:
How can I make a program executable from everywhere in a MACOS Monterey 12.01
I want to make nextflow https://www.nextflow.io/ or any other program executable from every directory in my MAC termninal.
I have done my research which says find the ~/.bashrc file and write in there
export PATH=$PATH:</path/to/file/>
In my MACOS Monterey 12.01 there is no ~/.bashrc file but a ~/.zshrc file. Shall I add the command on the ~/.zshrc or I have to create a ~/.bashrc. If I have to create a ~/.bashrc how do I do this?
Sorry for the basic question
A:
In the past, you had to decide, which of the terminal types you want to choose. Your tutorial is maybe already deprecated, because the change of the terminal system already appeared. Now it should be the ~/.zshrc or maybe the ~/.zshenv file, that does what you want.
On my MacOS Monterey 12.1 Beta the zshenv file is used to setup global things
A:
Why don't you add it to the ~/.zshrc and see if it works? My guess is that it does the job.
A:
Do as Gordon suggested. Add your program into your PATH export PATH="$PATH":</path/to/directory>. Restart terminal, and then you ll be able to execute command from anywhere
| How can I make a program executable from everywhere in a MACOS Monterey 12.01 | I want to make nextflow https://www.nextflow.io/ or any other program executable from every directory in my MAC termninal.
I have done my research which says find the ~/.bashrc file and write in there
export PATH=$PATH:</path/to/file/>
In my MACOS Monterey 12.01 there is no ~/.bashrc file but a ~/.zshrc file. Shall I add the command on the ~/.zshrc or I have to create a ~/.bashrc. If I have to create a ~/.bashrc how do I do this?
Sorry for the basic question
| [
"In the past, you had to decide, which of the terminal types you want to choose. Your tutorial is maybe already deprecated, because the change of the terminal system already appeared. Now it should be the ~/.zshrc or maybe the ~/.zshenv file, that does what you want.\nOn my MacOS Monterey 12.1 Beta the zshenv file is used to setup global things\n",
"Why don't you add it to the ~/.zshrc and see if it works? My guess is that it does the job.\n",
"Do as Gordon suggested. Add your program into your PATH export PATH=\"$PATH\":</path/to/directory>. Restart terminal, and then you ll be able to execute command from anywhere\n"
] | [
3,
2,
1
] | [] | [] | [
"command_line",
"macos"
] | stackoverflow_0070146902_command_line_macos.txt |
Q:
Sum by Factors From Codewars.com
Sinopsis: my code runs well with simple lists, but when I attempt, after the 4 basic test its execution time gets timed out.
Since I don't want to look for others solution, I'm asking for help and someone can show me which part of the code its messing with the time execution in order to focus only into modify that part.
Note: I don't want a finally solution, just know which part of the code I have to change please
Exercise:
Given an array of positive or negative integers
I= [i1,..,in]
you have to produce a sorted array P of the form
[ [p, sum of all ij of I for which p is a prime factor (p positive) of ij] ...]
P will be sorted by increasing order of the prime numbers. The final result has to be given as a string in Java, C# or C++ and as an array of arrays in other languages.
Example:
I = [12, 15] # result = [[2, 12], [3, 27], [5, 15]]
[2, 3, 5] is the list of all prime factors of the elements of I, hence the result.
Notes: It can happen that a sum is 0 if some numbers are negative!
Example: I = [15, 30, -45] 5 divides 15, 30 and (-45) so 5 appears in the result, the sum of the numbers for which 5 is a factor is 0 so we have [5, 0] in the result amongst others.
`
def sum_for_list(lst):
if len(lst) == 0:
return []
max = sorted(list(map(lambda x: abs(x), lst)), reverse = True)[0]
#create the list with the primes, already filtered
primes = []
for i in range (2, max + 1):
for j in range (2, i):
if i % j == 0:
break
else:
for x in lst:
if x % i == 0:
primes.append([i])
break
#i add the sums to the primes
for i in primes:
sum = 0
for j in lst:
if j % i[0] == 0:
sum += j
i.append(sum)
return primes
`
Image
I tried to simplyfy the code as much as I could but same result.
I also tried other ways to iterate in the first step:
# Find the maximum value in the list
from functools import reduce
max = reduce(lambda x,y: abs(x) if abs(x)>abs(y) else abs(y), lst)
A:
One possible cause of timeouts in your code is the use of the sorted function with the reverse = True argument. This sorts the input list in reverse order, which can be inefficient for large lists.
Instead of sorting the list in reverse order, you can use the built-in max function to find the maximum value in the list. This will avoid the need to sort the entire list, which can improve the performance of your code.
Here is an example of how you could modify your code to use the max function instead of sorting the list:
import math
def sum_for_list(lst):
if len(lst) == 0:
return []
# Find the greatest common divisor of all numbers in the list
maxItem = abs(lst[0])
for i in range(1, len(lst)):
maxItem = math.gcd(maxItem, abs(lst[i]))
# Create the list with the primes, already filtered
primes = []
for i in range (2, maxItem + 1):
for j in range (2, i):
if i % j == 0:
break
else:
for x in lst:
if x % i == 0:
primes.append(i)
break
# Add the sums to the primes
sums = []
for i in primes:
sum = 0
for j in lst:
if j % i == 0:
sum += j
sums.append(sum)
return sums
This code should be more efficient than the original code, and should be able to run without timing out on larger input lists. However, it is still not optimized for large lists, and you may need to consider further improvements if you need to handle very large inputs.
| Sum by Factors From Codewars.com | Sinopsis: my code runs well with simple lists, but when I attempt, after the 4 basic test its execution time gets timed out.
Since I don't want to look for others solution, I'm asking for help and someone can show me which part of the code its messing with the time execution in order to focus only into modify that part.
Note: I don't want a finally solution, just know which part of the code I have to change please
Exercise:
Given an array of positive or negative integers
I= [i1,..,in]
you have to produce a sorted array P of the form
[ [p, sum of all ij of I for which p is a prime factor (p positive) of ij] ...]
P will be sorted by increasing order of the prime numbers. The final result has to be given as a string in Java, C# or C++ and as an array of arrays in other languages.
Example:
I = [12, 15] # result = [[2, 12], [3, 27], [5, 15]]
[2, 3, 5] is the list of all prime factors of the elements of I, hence the result.
Notes: It can happen that a sum is 0 if some numbers are negative!
Example: I = [15, 30, -45] 5 divides 15, 30 and (-45) so 5 appears in the result, the sum of the numbers for which 5 is a factor is 0 so we have [5, 0] in the result amongst others.
`
def sum_for_list(lst):
if len(lst) == 0:
return []
max = sorted(list(map(lambda x: abs(x), lst)), reverse = True)[0]
#create the list with the primes, already filtered
primes = []
for i in range (2, max + 1):
for j in range (2, i):
if i % j == 0:
break
else:
for x in lst:
if x % i == 0:
primes.append([i])
break
#i add the sums to the primes
for i in primes:
sum = 0
for j in lst:
if j % i[0] == 0:
sum += j
i.append(sum)
return primes
`
Image
I tried to simplyfy the code as much as I could but same result.
I also tried other ways to iterate in the first step:
# Find the maximum value in the list
from functools import reduce
max = reduce(lambda x,y: abs(x) if abs(x)>abs(y) else abs(y), lst)
| [
"One possible cause of timeouts in your code is the use of the sorted function with the reverse = True argument. This sorts the input list in reverse order, which can be inefficient for large lists.\nInstead of sorting the list in reverse order, you can use the built-in max function to find the maximum value in the list. This will avoid the need to sort the entire list, which can improve the performance of your code.\nHere is an example of how you could modify your code to use the max function instead of sorting the list:\nimport math\n\ndef sum_for_list(lst):\n if len(lst) == 0:\n return []\n\n # Find the greatest common divisor of all numbers in the list\n maxItem = abs(lst[0])\n for i in range(1, len(lst)):\n maxItem = math.gcd(maxItem, abs(lst[i]))\n\n # Create the list with the primes, already filtered\n primes = []\n for i in range (2, maxItem + 1): \n for j in range (2, i): \n if i % j == 0: \n break \n else:\n for x in lst:\n if x % i == 0: \n primes.append(i)\n break\n\n # Add the sums to the primes\n sums = []\n for i in primes:\n sum = 0\n for j in lst:\n if j % i == 0:\n sum += j\n sums.append(sum)\n\n return sums\n\nThis code should be more efficient than the original code, and should be able to run without timing out on larger input lists. However, it is still not optimized for large lists, and you may need to consider further improvements if you need to handle very large inputs.\n"
] | [
0
] | [] | [] | [
"performance",
"python",
"time"
] | stackoverflow_0074675160_performance_python_time.txt |
Q:
Python Selenium with Salesforce - Cannot Seem to Access Certain Form Elements
Using Selenium to try and automate a bit of data entry with Salesforce. I have gotten my script to load a webpage, allow me to login, and click an "edit" button.
My next step is to enter data into a field. However, I keep getting an error about the field not being found. I've tried to identify it by XPATH, NAME, and ID and continue to get the error. For reference, my script works with a simple webpage like Google. I have a feeling that clicking the edit button in Salesforce opens either another window or frame (sorry if I'm using the wrong terminology). Things I've tried:
Looking for other frames (can't seem to find any in the HTML)
Having my script wait until the element is present (doesn't seem to work)
Any other options? Thank you!
A:
Salesforce's Lighting Experience (the new white-blue UI) is built with web components that hide their internal implementation details. You'd need to read up a bit about "shadow DOM", it's not a "happy soup" of html and JS all chucked into top page's html. Means that CSS is limited to that one component, there's no risk of spilling over or overwriting another page area's JS function if you both declare function with same name - but it also means it's much harder to get into element's internals.
You'll have to read up about how Selenium deals with Shadow DOM. Some companies claim they have working Lightning UI automated tests/ Heard good stuff about Provar, haven't used it myself.
For custom UI components SF developer has option to use "light dom", for standard UI you'll struggle a bit. If you're looking for some automation without fighting with Lighting Experience (especially that with 3 releases/year SF sometimes changes the structure of generated html, breaking old tests) - you could consider switching over to classic UI for the test? It'll be more accessible for Selenium, won't be exactly same thing the user does - but server-side errors like required fields, validation rules should fire all the same.
| Python Selenium with Salesforce - Cannot Seem to Access Certain Form Elements | Using Selenium to try and automate a bit of data entry with Salesforce. I have gotten my script to load a webpage, allow me to login, and click an "edit" button.
My next step is to enter data into a field. However, I keep getting an error about the field not being found. I've tried to identify it by XPATH, NAME, and ID and continue to get the error. For reference, my script works with a simple webpage like Google. I have a feeling that clicking the edit button in Salesforce opens either another window or frame (sorry if I'm using the wrong terminology). Things I've tried:
Looking for other frames (can't seem to find any in the HTML)
Having my script wait until the element is present (doesn't seem to work)
Any other options? Thank you!
| [
"Salesforce's Lighting Experience (the new white-blue UI) is built with web components that hide their internal implementation details. You'd need to read up a bit about \"shadow DOM\", it's not a \"happy soup\" of html and JS all chucked into top page's html. Means that CSS is limited to that one component, there's no risk of spilling over or overwriting another page area's JS function if you both declare function with same name - but it also means it's much harder to get into element's internals.\nYou'll have to read up about how Selenium deals with Shadow DOM. Some companies claim they have working Lightning UI automated tests/ Heard good stuff about Provar, haven't used it myself.\nFor custom UI components SF developer has option to use \"light dom\", for standard UI you'll struggle a bit. If you're looking for some automation without fighting with Lighting Experience (especially that with 3 releases/year SF sometimes changes the structure of generated html, breaking old tests) - you could consider switching over to classic UI for the test? It'll be more accessible for Selenium, won't be exactly same thing the user does - but server-side errors like required fields, validation rules should fire all the same.\n"
] | [
0
] | [] | [] | [
"frames",
"html",
"python",
"salesforce",
"selenium"
] | stackoverflow_0074674569_frames_html_python_salesforce_selenium.txt |
Q:
ModuleNotFoundError: No module named 'translate' , even after "pip install translate"
I am having this error, even after "pip install translate" multiple times.
I am running my application in a docker container. I am a beginner , so please let me know, what mistake i am doing.
`
Traceback (most recent call last):
File "/usr/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap
self.run()
File "/usr/lib/python3.10/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.10/dist-packages/uvicorn/_subprocess.py", line 76, in subprocess_started
target(sockets=sockets)
File "/usr/local/lib/python3.10/dist-packages/uvicorn/server.py", line 60, in run
return asyncio.run(self.serve(sockets=sockets))
File "/usr/lib/python3.10/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/usr/lib/python3.10/asyncio/base_events.py", line 646, in run_until_complete
return future.result()
File "/usr/local/lib/python3.10/dist-packages/uvicorn/server.py", line 67, in serve
config.load()
File "/usr/local/lib/python3.10/dist-packages/uvicorn/config.py", line 477, in load
self.loaded_app = import_from_string(self.app)
File "/usr/local/lib/python3.10/dist-packages/uvicorn/importer.py", line 24, in import_from_string
raise exc from None
File "/usr/local/lib/python3.10/dist-packages/uvicorn/importer.py", line 21, in import_from_string
module = importlib.import_module(module_str)
File "/usr/lib/python3.10/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1050, in _gcd_import
File "<frozen importlib._bootstrap>", line 1027, in _find_and_load
File "<frozen importlib._bootstrap>", line 1006, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 688, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 883, in exec_module
File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
File "/usr/./main.py", line 8, in <module>
from translate import Translator
ModuleNotFoundError: No module named 'translate'
when I am running "python3 -m pip install translate"
Requirement already satisfied: translate in /root/anaconda3/envs/newenvt1/lib/python3.9/site-packages (3.6.1)
Requirement already satisfied: click in /root/anaconda3/envs/newenvt1/lib/python3.9/site-packages (from translate) (8.0.4)
Requirement already satisfied: libretranslatepy==2.1.1 in /root/anaconda3/envs/newenvt1/lib/python3.9/site-packages (from translate) (2.1.1)
Requirement already satisfied: lxml in /root/anaconda3/envs/newenvt1/lib/python3.9/site-packages (from translate) (4.9.1)
Requirement already satisfied: requests in /root/anaconda3/envs/newenvt1/lib/python3.9/site-packages (from translate) (2.28.1)
Requirement already satisfied: charset-normalizer<3,>=2 in /root/anaconda3/envs/newenvt1/lib/python3.9/site-packages (from requests->translate) (2.0.4)
Requirement already satisfied: idna<4,>=2.5 in /root/anaconda3/envs/newenvt1/lib/python3.9/site-packages (from requests->translate) (2.10)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /root/anaconda3/envs/newenvt1/lib/python3.9/site-packages (from requests->translate) (1.26.12)
Requirement already satisfied: certifi>=2017.4.17 in /root/anaconda3/envs/newenvt1/lib/python3.9/site-packages (from requests->translate) (2022.9.24)
WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv
`
When I did "pip list", translate is there but still this error is coming.
A:
You need not local pip install, but install in your docker.
Add to Dockerfile
python3 -m pip install translate
And rebuild your image
| ModuleNotFoundError: No module named 'translate' , even after "pip install translate" | I am having this error, even after "pip install translate" multiple times.
I am running my application in a docker container. I am a beginner , so please let me know, what mistake i am doing.
`
Traceback (most recent call last):
File "/usr/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap
self.run()
File "/usr/lib/python3.10/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.10/dist-packages/uvicorn/_subprocess.py", line 76, in subprocess_started
target(sockets=sockets)
File "/usr/local/lib/python3.10/dist-packages/uvicorn/server.py", line 60, in run
return asyncio.run(self.serve(sockets=sockets))
File "/usr/lib/python3.10/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/usr/lib/python3.10/asyncio/base_events.py", line 646, in run_until_complete
return future.result()
File "/usr/local/lib/python3.10/dist-packages/uvicorn/server.py", line 67, in serve
config.load()
File "/usr/local/lib/python3.10/dist-packages/uvicorn/config.py", line 477, in load
self.loaded_app = import_from_string(self.app)
File "/usr/local/lib/python3.10/dist-packages/uvicorn/importer.py", line 24, in import_from_string
raise exc from None
File "/usr/local/lib/python3.10/dist-packages/uvicorn/importer.py", line 21, in import_from_string
module = importlib.import_module(module_str)
File "/usr/lib/python3.10/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1050, in _gcd_import
File "<frozen importlib._bootstrap>", line 1027, in _find_and_load
File "<frozen importlib._bootstrap>", line 1006, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 688, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 883, in exec_module
File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
File "/usr/./main.py", line 8, in <module>
from translate import Translator
ModuleNotFoundError: No module named 'translate'
when I am running "python3 -m pip install translate"
Requirement already satisfied: translate in /root/anaconda3/envs/newenvt1/lib/python3.9/site-packages (3.6.1)
Requirement already satisfied: click in /root/anaconda3/envs/newenvt1/lib/python3.9/site-packages (from translate) (8.0.4)
Requirement already satisfied: libretranslatepy==2.1.1 in /root/anaconda3/envs/newenvt1/lib/python3.9/site-packages (from translate) (2.1.1)
Requirement already satisfied: lxml in /root/anaconda3/envs/newenvt1/lib/python3.9/site-packages (from translate) (4.9.1)
Requirement already satisfied: requests in /root/anaconda3/envs/newenvt1/lib/python3.9/site-packages (from translate) (2.28.1)
Requirement already satisfied: charset-normalizer<3,>=2 in /root/anaconda3/envs/newenvt1/lib/python3.9/site-packages (from requests->translate) (2.0.4)
Requirement already satisfied: idna<4,>=2.5 in /root/anaconda3/envs/newenvt1/lib/python3.9/site-packages (from requests->translate) (2.10)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /root/anaconda3/envs/newenvt1/lib/python3.9/site-packages (from requests->translate) (1.26.12)
Requirement already satisfied: certifi>=2017.4.17 in /root/anaconda3/envs/newenvt1/lib/python3.9/site-packages (from requests->translate) (2022.9.24)
WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv
`
When I did "pip list", translate is there but still this error is coming.
| [
"You need not local pip install, but install in your docker.\nAdd to Dockerfile\npython3 -m pip install translate\n\nAnd rebuild your image\n"
] | [
0
] | [] | [] | [
"docker",
"fastapi",
"pip",
"python",
"uvicorn"
] | stackoverflow_0074674226_docker_fastapi_pip_python_uvicorn.txt |
Q:
Why getting this Error selenium.common.exceptions.StaleElementReferenceException:
I know already upload answer to this same question but I try them they are not working for me because there is also some some update in selenium code too.
selenium.common.exceptions.StaleElementReferenceException: Message: stale element reference: element is not attached to the page document
(Session info: chrome=108.0.5359.95)
When trying to send my searching keyword in this input with label "Skills Search" in advance searching pop-pup form.
Here is the URL: https://www.upwork.com/nx/jobs/search/modals/advanced-search?sort=recency&pageTitle=Advanced%20Search&_navType=modal&_modalInfo=%5B%7B%22navType%22%3A%22modal%22,%22title%22%3A%22Advanced%20Search%22,%22modalId%22%3A%221670133126002%22,%22channelName%22%3A%22advanced-search-modal%22%7D%5D
Here is my code:
import time
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument("start-maximized")
webdriver_service = Service('F:\\work\\chromedriver_win32\\chromedriver.exe')
driver = webdriver.Chrome(options=options, service=webdriver_service)
wait = WebDriverWait(driver, 10)
url = "https://www.upwork.com/nx/jobs/search/?sort=recency"
driver.get(url)
key = ["Web Scraping","Selenium WebDriver", "Data Scraping", "selenium", "Web Crawling", "Beautiful Soup", "Scrapy", "Data Extraction", "Automation"]
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'button#onetrust-accept-btn-handler')))
time.sleep(5)
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'button#onetrust-accept-btn-handler'))).click()
for i in range(len(key)):
wait.until(EC.element_to_be_clickable((By.XPATH, '//button[contains(@title,"Advanced Search")]'))).click()
time.sleep(5)
advanced_search_input = driver.find_element(By.XPATH,'//input[contains(@aria-labelledby,"tokenizer-label")]')
# advanced_search_input.click()
advanced_search_input.send_keys(key[i])
result giving now
A:
By clicking '//input[contains(@aria-labelledby,"tokenizer-label")]' element it is re-built on the page (really strange approach they built that page).
To make this code working I added a delay after clearing and clicking that input and then get that element again.
The following code worked for me:
import time
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument("start-maximized")
webdriver_service = Service('C:\webdrivers\chromedriver.exe')
driver = webdriver.Chrome(options=options, service=webdriver_service)
wait = WebDriverWait(driver, 10)
url = "https://www.upwork.com/nx/jobs/search/?sort=recency"
driver.get(url)
keys = ["Web Scraping","Selenium WebDriver", "Data Scraping", "selenium", "Web Crawling", "Beautiful Soup", "Scrapy", "Data Extraction", "Automation"]
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'button#onetrust-accept-btn-handler')))
time.sleep(5)
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'button#onetrust-accept-btn-handler'))).click()
for i in range(len(keys)):
wait.until(EC.element_to_be_clickable((By.XPATH, '//button[contains(@title,"Advanced Search")]'))).click()
wait.until(EC.element_to_be_clickable((By.XPATH,'//input[contains(@aria-labelledby,"tokenizer-label")]'))).clear()
wait.until(EC.element_to_be_clickable((By.XPATH, '//input[contains(@aria-labelledby,"tokenizer-label")]'))).click()
time.sleep(3)
wait.until(EC.element_to_be_clickable((By.XPATH, '//input[contains(@aria-labelledby,"tokenizer-label")]'))).send_keys(keys[i])
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'[data-test="modal-advanced-search-search-btn"]'))).click()
UPD
In order to select multiple search values you need to insert each value, select the appearing autocomplete option and continue, as in the code below:
import time
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument("start-maximized")
webdriver_service = Service('C:\webdrivers\chromedriver.exe')
driver = webdriver.Chrome(options=options, service=webdriver_service)
wait = WebDriverWait(driver, 10)
url = "https://www.upwork.com/nx/jobs/search/?sort=recency"
driver.get(url)
keys = ["Web Scraping", "Selenium WebDriver", "Data Scraping", "Selenium", "Beautiful Soup", "Scrapy", "Data Extraction", "Automation"] #
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'button#onetrust-accept-btn-handler')))
time.sleep(5)
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'button#onetrust-accept-btn-handler'))).click()
wait.until(EC.element_to_be_clickable((By.XPATH, '//button[contains(@title,"Advanced Search")]'))).click()
wait.until(EC.element_to_be_clickable((By.XPATH,'//input[contains(@aria-labelledby,"tokenizer-label")]'))).clear()
wait.until(EC.element_to_be_clickable((By.XPATH, '//input[contains(@aria-labelledby,"tokenizer-label")]'))).click()
time.sleep(3)
for i in range(len(keys)):
wait.until(EC.element_to_be_clickable((By.XPATH, '//input[contains(@aria-labelledby,"tokenizer-label")]'))).send_keys(keys[i])
time.sleep(2)
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "#typeahead-input-control-35 .up-menu-item-text"))).click()
time.sleep(4)
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'[data-test="modal-advanced-search-search-btn"]'))).click()
UPD
Finally did it!
The problem with wrong inputs caused by too slow response time of that page.
To make it working I inserted a small delay between inserting each character of the input string. In this case the result is as expected.
This is the final working code:
import time
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument("start-maximized")
webdriver_service = Service('C:\webdrivers\chromedriver.exe')
driver = webdriver.Chrome(options=options, service=webdriver_service)
wait = WebDriverWait(driver, 10)
url = "https://www.upwork.com/nx/jobs/search/?sort=recency"
driver.get(url)
keys = ["Web Scraping", "Selenium Webdriver", "Data Scraping", "Selenium", "Beautiful Soup", "Scrapy", "Data Extraction", "Automation"]
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'button#onetrust-accept-btn-handler')))
time.sleep(5)
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'button#onetrust-accept-btn-handler'))).click()
wait.until(EC.element_to_be_clickable((By.XPATH, '//button[contains(@title,"Advanced Search")]'))).click()
wait.until(EC.element_to_be_clickable((By.XPATH,'//input[contains(@aria-labelledby,"tokenizer-label")]'))).clear()
wait.until(EC.element_to_be_clickable((By.XPATH, '//input[contains(@aria-labelledby,"tokenizer-label")]'))).click()
time.sleep(3)
for i in range(len(keys)):
search_field = wait.until(EC.element_to_be_clickable((By.XPATH, '//input[contains(@aria-labelledby,"tokenizer-label")]')))
search_field.click()
for character in keys[i]:
search_field.send_keys(character)
time.sleep(0.05)
time.sleep(2)
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "#typeahead-input-control-35 .up-menu-item-text"))).click()
time.sleep(2)
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'[data-test="modal-advanced-search-search-btn"]'))).click()
The result is
| Why getting this Error selenium.common.exceptions.StaleElementReferenceException: | I know already upload answer to this same question but I try them they are not working for me because there is also some some update in selenium code too.
selenium.common.exceptions.StaleElementReferenceException: Message: stale element reference: element is not attached to the page document
(Session info: chrome=108.0.5359.95)
When trying to send my searching keyword in this input with label "Skills Search" in advance searching pop-pup form.
Here is the URL: https://www.upwork.com/nx/jobs/search/modals/advanced-search?sort=recency&pageTitle=Advanced%20Search&_navType=modal&_modalInfo=%5B%7B%22navType%22%3A%22modal%22,%22title%22%3A%22Advanced%20Search%22,%22modalId%22%3A%221670133126002%22,%22channelName%22%3A%22advanced-search-modal%22%7D%5D
Here is my code:
import time
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument("start-maximized")
webdriver_service = Service('F:\\work\\chromedriver_win32\\chromedriver.exe')
driver = webdriver.Chrome(options=options, service=webdriver_service)
wait = WebDriverWait(driver, 10)
url = "https://www.upwork.com/nx/jobs/search/?sort=recency"
driver.get(url)
key = ["Web Scraping","Selenium WebDriver", "Data Scraping", "selenium", "Web Crawling", "Beautiful Soup", "Scrapy", "Data Extraction", "Automation"]
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'button#onetrust-accept-btn-handler')))
time.sleep(5)
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'button#onetrust-accept-btn-handler'))).click()
for i in range(len(key)):
wait.until(EC.element_to_be_clickable((By.XPATH, '//button[contains(@title,"Advanced Search")]'))).click()
time.sleep(5)
advanced_search_input = driver.find_element(By.XPATH,'//input[contains(@aria-labelledby,"tokenizer-label")]')
# advanced_search_input.click()
advanced_search_input.send_keys(key[i])
result giving now
| [
"By clicking '//input[contains(@aria-labelledby,\"tokenizer-label\")]' element it is re-built on the page (really strange approach they built that page).\nTo make this code working I added a delay after clearing and clicking that input and then get that element again.\nThe following code worked for me:\nimport time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\n\noptions = Options()\noptions.add_argument(\"start-maximized\")\n\nwebdriver_service = Service('C:\\webdrivers\\chromedriver.exe')\ndriver = webdriver.Chrome(options=options, service=webdriver_service)\nwait = WebDriverWait(driver, 10)\n\nurl = \"https://www.upwork.com/nx/jobs/search/?sort=recency\"\ndriver.get(url)\n\nkeys = [\"Web Scraping\",\"Selenium WebDriver\", \"Data Scraping\", \"selenium\", \"Web Crawling\", \"Beautiful Soup\", \"Scrapy\", \"Data Extraction\", \"Automation\"]\nwait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'button#onetrust-accept-btn-handler')))\ntime.sleep(5)\nwait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'button#onetrust-accept-btn-handler'))).click()\nfor i in range(len(keys)):\n wait.until(EC.element_to_be_clickable((By.XPATH, '//button[contains(@title,\"Advanced Search\")]'))).click()\n wait.until(EC.element_to_be_clickable((By.XPATH,'//input[contains(@aria-labelledby,\"tokenizer-label\")]'))).clear()\n wait.until(EC.element_to_be_clickable((By.XPATH, '//input[contains(@aria-labelledby,\"tokenizer-label\")]'))).click()\n time.sleep(3)\n wait.until(EC.element_to_be_clickable((By.XPATH, '//input[contains(@aria-labelledby,\"tokenizer-label\")]'))).send_keys(keys[i])\n wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'[data-test=\"modal-advanced-search-search-btn\"]'))).click()\n\nUPD\nIn order to select multiple search values you need to insert each value, select the appearing autocomplete option and continue, as in the code below:\nimport time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\n\noptions = Options()\noptions.add_argument(\"start-maximized\")\n\nwebdriver_service = Service('C:\\webdrivers\\chromedriver.exe')\ndriver = webdriver.Chrome(options=options, service=webdriver_service)\nwait = WebDriverWait(driver, 10)\n\nurl = \"https://www.upwork.com/nx/jobs/search/?sort=recency\"\ndriver.get(url)\n\nkeys = [\"Web Scraping\", \"Selenium WebDriver\", \"Data Scraping\", \"Selenium\", \"Beautiful Soup\", \"Scrapy\", \"Data Extraction\", \"Automation\"] #\nwait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'button#onetrust-accept-btn-handler')))\ntime.sleep(5)\nwait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'button#onetrust-accept-btn-handler'))).click()\nwait.until(EC.element_to_be_clickable((By.XPATH, '//button[contains(@title,\"Advanced Search\")]'))).click()\nwait.until(EC.element_to_be_clickable((By.XPATH,'//input[contains(@aria-labelledby,\"tokenizer-label\")]'))).clear()\nwait.until(EC.element_to_be_clickable((By.XPATH, '//input[contains(@aria-labelledby,\"tokenizer-label\")]'))).click()\ntime.sleep(3)\nfor i in range(len(keys)):\n wait.until(EC.element_to_be_clickable((By.XPATH, '//input[contains(@aria-labelledby,\"tokenizer-label\")]'))).send_keys(keys[i])\n time.sleep(2)\n wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, \"#typeahead-input-control-35 .up-menu-item-text\"))).click()\n time.sleep(4)\nwait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'[data-test=\"modal-advanced-search-search-btn\"]'))).click()\n\nUPD\nFinally did it!\nThe problem with wrong inputs caused by too slow response time of that page.\nTo make it working I inserted a small delay between inserting each character of the input string. In this case the result is as expected.\nThis is the final working code:\nimport time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\n\noptions = Options()\noptions.add_argument(\"start-maximized\")\n\nwebdriver_service = Service('C:\\webdrivers\\chromedriver.exe')\ndriver = webdriver.Chrome(options=options, service=webdriver_service)\nwait = WebDriverWait(driver, 10)\n\nurl = \"https://www.upwork.com/nx/jobs/search/?sort=recency\"\ndriver.get(url)\n\nkeys = [\"Web Scraping\", \"Selenium Webdriver\", \"Data Scraping\", \"Selenium\", \"Beautiful Soup\", \"Scrapy\", \"Data Extraction\", \"Automation\"]\nwait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'button#onetrust-accept-btn-handler')))\ntime.sleep(5)\nwait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'button#onetrust-accept-btn-handler'))).click()\nwait.until(EC.element_to_be_clickable((By.XPATH, '//button[contains(@title,\"Advanced Search\")]'))).click()\nwait.until(EC.element_to_be_clickable((By.XPATH,'//input[contains(@aria-labelledby,\"tokenizer-label\")]'))).clear()\nwait.until(EC.element_to_be_clickable((By.XPATH, '//input[contains(@aria-labelledby,\"tokenizer-label\")]'))).click()\ntime.sleep(3)\nfor i in range(len(keys)):\n search_field = wait.until(EC.element_to_be_clickable((By.XPATH, '//input[contains(@aria-labelledby,\"tokenizer-label\")]')))\n search_field.click()\n for character in keys[i]:\n search_field.send_keys(character)\n time.sleep(0.05)\n time.sleep(2)\n wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, \"#typeahead-input-control-35 .up-menu-item-text\"))).click()\n time.sleep(2)\nwait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'[data-test=\"modal-advanced-search-search-btn\"]'))).click()\n\nThe result is\n\n"
] | [
1
] | [] | [] | [
"python",
"selenium",
"selenium_webdriver",
"staleelementreferenceexception",
"xpath"
] | stackoverflow_0074675192_python_selenium_selenium_webdriver_staleelementreferenceexception_xpath.txt |
Q:
Inserting text from VBA UserForm textbox into part of a string
I'm new to VBA and hoping someone could help, if this might even be possible.
A date will be manually added by the user into UserForm TEXTBOX1 which will be placed at a bookmark in the document.
.Bookmarks("BOOKMARK1").Range _
.InsertBefore TEXTBOX1
I have option buttons for the user to select, which will place specific text (depending on the button selected) into the document as follows:
Private Sub OptionButton2_Click()
If Me.OptionButton2.Value = True Then
Set oRng = ActiveDocument.Bookmarks("BOOKMARK2").Range
oRng.Text = "EXAMPLE SENTENCE 1" & Chr(11) & Chr(9) & _
"EXAMPLE SENTENCE 2" & Chr(11) & _
"EXAMPLE SENTENCE 3" & vbNewLine & " "
ActiveDocument.Bookmarks.Add "BOOKMARK2", oRng
End If
End Sub
I am trying to get the date that was entered in TEXTBOX1 to appear at the end of the sentence of EXAMPLE SENTENCE 2 before the & CHR(11) &. Can anybody please help with this? Thank you!
I've tried numerous online searches to find the answer for my problem but haven't come across anything so far unfortunately.
A:
"EXAMPLE SENTENCE 2" & TEXTBOX1.Text & Chr(11)
| Inserting text from VBA UserForm textbox into part of a string | I'm new to VBA and hoping someone could help, if this might even be possible.
A date will be manually added by the user into UserForm TEXTBOX1 which will be placed at a bookmark in the document.
.Bookmarks("BOOKMARK1").Range _
.InsertBefore TEXTBOX1
I have option buttons for the user to select, which will place specific text (depending on the button selected) into the document as follows:
Private Sub OptionButton2_Click()
If Me.OptionButton2.Value = True Then
Set oRng = ActiveDocument.Bookmarks("BOOKMARK2").Range
oRng.Text = "EXAMPLE SENTENCE 1" & Chr(11) & Chr(9) & _
"EXAMPLE SENTENCE 2" & Chr(11) & _
"EXAMPLE SENTENCE 3" & vbNewLine & " "
ActiveDocument.Bookmarks.Add "BOOKMARK2", oRng
End If
End Sub
I am trying to get the date that was entered in TEXTBOX1 to appear at the end of the sentence of EXAMPLE SENTENCE 2 before the & CHR(11) &. Can anybody please help with this? Thank you!
I've tried numerous online searches to find the answer for my problem but haven't come across anything so far unfortunately.
| [
"\"EXAMPLE SENTENCE 2\" & TEXTBOX1.Text & Chr(11)\n\n"
] | [
0
] | [] | [] | [
"ms_word",
"textbox",
"userform",
"vba"
] | stackoverflow_0074674790_ms_word_textbox_userform_vba.txt |
Q:
Sqlite display table with informations about the databas
They are asking me to write a query that display the following table:
Select each table as a string
Select the number of attributes as an integer (count the number of attributes per table).
Select the number of rows using the COUNT(*) function
Use the compound-operator UNION ALL to bind these rows together.
How do I do that?
I knew that for getting the name of each table I have to use
SELECT name AS table_name
FROM sqlite_schema
WHERE type = 'table'
but the rest is a mistery to me.
Would you help me?
Thank you!!!
Here you can find the database
A:
Just had the same question, and the answer boiled down to 'manually' (looking at the schema visualisation or the aforementioned query for inputs). >_<
SELECT 'Customers' AS table_name,
13 AS number_of_attribute,
COUNT(*) AS number_of_row
FROM Customers
UNION ALL
SELECT 'Products' AS table_name,
9 AS number_of_attribute,
COUNT(*) AS number_of_row
FROM Products
UNION ALL
SELECT 'ProductLines' AS table_name,
4 AS number_of_attribute,
COUNT(*) AS number_of_row
FROM ProductLines
UNION ALL
SELECT 'Orders' AS table_name,
7 AS number_of_attribute,
COUNT(*) AS number_of_row
FROM Orders
UNION ALL
SELECT 'OrderDetails' AS table_name,
5 AS number_of_attribute,
COUNT(*) AS number_of_row
FROM OrderDetails
UNION ALL
SELECT 'Payments' AS table_name,
4 AS number_of_attribute,
COUNT(*) AS number_of_row
FROM Payments
UNION ALL
SELECT 'Employees' AS table_name,
8 AS number_of_attribute,
COUNT(*) AS number_of_row
FROM Employees
UNION ALL
SELECT 'Offices' AS table_name,
9 AS number_of_attribute,
COUNT(*) AS number_of_row
FROM Offices;
| Sqlite display table with informations about the databas | They are asking me to write a query that display the following table:
Select each table as a string
Select the number of attributes as an integer (count the number of attributes per table).
Select the number of rows using the COUNT(*) function
Use the compound-operator UNION ALL to bind these rows together.
How do I do that?
I knew that for getting the name of each table I have to use
SELECT name AS table_name
FROM sqlite_schema
WHERE type = 'table'
but the rest is a mistery to me.
Would you help me?
Thank you!!!
Here you can find the database
| [
"Just had the same question, and the answer boiled down to 'manually' (looking at the schema visualisation or the aforementioned query for inputs). >_<\nSELECT 'Customers' AS table_name,\n13 AS number_of_attribute,\nCOUNT(*) AS number_of_row\nFROM Customers\nUNION ALL\nSELECT 'Products' AS table_name,\n9 AS number_of_attribute,\nCOUNT(*) AS number_of_row\nFROM Products\nUNION ALL\nSELECT 'ProductLines' AS table_name,\n4 AS number_of_attribute,\nCOUNT(*) AS number_of_row\nFROM ProductLines\nUNION ALL\nSELECT 'Orders' AS table_name,\n7 AS number_of_attribute,\nCOUNT(*) AS number_of_row\nFROM Orders\nUNION ALL\nSELECT 'OrderDetails' AS table_name,\n5 AS number_of_attribute,\nCOUNT(*) AS number_of_row\nFROM OrderDetails\nUNION ALL\nSELECT 'Payments' AS table_name,\n4 AS number_of_attribute,\nCOUNT(*) AS number_of_row\nFROM Payments\nUNION ALL\nSELECT 'Employees' AS table_name,\n8 AS number_of_attribute,\nCOUNT(*) AS number_of_row\nFROM Employees\nUNION ALL\nSELECT 'Offices' AS table_name,\n9 AS number_of_attribute,\nCOUNT(*) AS number_of_row\nFROM Offices;\n"
] | [
0
] | [] | [] | [
"create_table",
"database",
"sqlite"
] | stackoverflow_0069442859_create_table_database_sqlite.txt |
Q:
Visual Studio 2022 dosen't see files in project when I create ASP.Net Project
I have problem with my Visual. When I tried to create ASP.NET project, visual studio doesn't see any files in project. I see only No solution(0) . I create project in the same way like in this topic : Why my latest version Visual Studio 2022 does not contain any project under the solution in the Solution Explorer
I Tried to reinstall visual about 6 times. I don't know where is the problem.
A:
the problem is that you are trying to use a version project framework that you don't installed yet.
What .net version of asp.net do you try? Are you installed this framework?
| Visual Studio 2022 dosen't see files in project when I create ASP.Net Project | I have problem with my Visual. When I tried to create ASP.NET project, visual studio doesn't see any files in project. I see only No solution(0) . I create project in the same way like in this topic : Why my latest version Visual Studio 2022 does not contain any project under the solution in the Solution Explorer
I Tried to reinstall visual about 6 times. I don't know where is the problem.
| [
"the problem is that you are trying to use a version project framework that you don't installed yet.\nWhat .net version of asp.net do you try? Are you installed this framework?\n"
] | [
0
] | [] | [] | [
"asp.net",
"c#",
"visual_studio"
] | stackoverflow_0074675017_asp.net_c#_visual_studio.txt |
Q:
What are the best practices about cognito and oauth2 resource server
In my company, we use api gateway with aws cognito.Our process is to create an API, add the swagger on API gateway and create a resource server with desired scope.We have our appclient accessing the needed scope and that's all.For one API we create, we create one resource server with its scope.
AWS limits the number of resource server to 25 (with a maximum of 300) within a single user pool. So in my case, i can only create 25 api until i get blocked.
Oauth2 definition of resource server is "The resource server is the OAuth 2.0 term for your API server" and the auth0 is "the server hosting the protected resources. This is the API you want to access". But it's not quite the case with cognito as it's "a remote server that authorizes access based on OAuth 2.0 scopes in an access token."
My question is : is my approach the right approach concerning the resource server within cognito ? or should i factorize my resource server somehow ?
I am actually trying to increase resource server quota but the AWS support questions is questionning my approach
A:
Resource servers are used to manage access to your API's protected resources, and AWS Cognito allows you to create a maximum of 25 resource servers with a maximum of 300 scopes per resource server. If you are reaching the limit of 25 resource servers and need to create more, consider factoring your resource servers in some way to avoid hitting the limit. You can create a single resource server and define multiple scopes for each of your API's protected resources. So you can define access to each of your API's protected resources within a single resource server, instead of creating a separate resource server for each API.
Also you can create a hierarchy of resource servers, with a parent resource server that manages access to multiple child resource servers. This would allow you to create a larger number of resource servers while still maintaining control over access to your API's protected resources.
Really depends on your specific use case and the requirements of your API.
| What are the best practices about cognito and oauth2 resource server | In my company, we use api gateway with aws cognito.Our process is to create an API, add the swagger on API gateway and create a resource server with desired scope.We have our appclient accessing the needed scope and that's all.For one API we create, we create one resource server with its scope.
AWS limits the number of resource server to 25 (with a maximum of 300) within a single user pool. So in my case, i can only create 25 api until i get blocked.
Oauth2 definition of resource server is "The resource server is the OAuth 2.0 term for your API server" and the auth0 is "the server hosting the protected resources. This is the API you want to access". But it's not quite the case with cognito as it's "a remote server that authorizes access based on OAuth 2.0 scopes in an access token."
My question is : is my approach the right approach concerning the resource server within cognito ? or should i factorize my resource server somehow ?
I am actually trying to increase resource server quota but the AWS support questions is questionning my approach
| [
"Resource servers are used to manage access to your API's protected resources, and AWS Cognito allows you to create a maximum of 25 resource servers with a maximum of 300 scopes per resource server. If you are reaching the limit of 25 resource servers and need to create more, consider factoring your resource servers in some way to avoid hitting the limit. You can create a single resource server and define multiple scopes for each of your API's protected resources. So you can define access to each of your API's protected resources within a single resource server, instead of creating a separate resource server for each API.\nAlso you can create a hierarchy of resource servers, with a parent resource server that manages access to multiple child resource servers. This would allow you to create a larger number of resource servers while still maintaining control over access to your API's protected resources.\nReally depends on your specific use case and the requirements of your API.\n"
] | [
0
] | [] | [] | [
"amazon_cognito",
"amazon_web_services",
"oauth",
"oauth2resourceserver"
] | stackoverflow_0074675168_amazon_cognito_amazon_web_services_oauth_oauth2resourceserver.txt |
Q:
Compare constant equals object or object equal constant
Follow Java best-java-coding-practices.htm, they say we need call .equals on known string constants rather than UNKNOWN variable
String string = new Test().getString();
// always compare like this, this will never throw NPE
System.out.println("CONSTANT.equals(string):"+CONSTANT.equals(string));
System.out.println("Comparision like string.equals(CONSTANT) may throw NullPointerException");
// next statement will throw NPE
System.out.println("string.equals(CONSTANT):"+string.equals(CONSTANT));
So how about KNOWN variable? Should we still use this way or not?
For example, if I receive an object from server and server notify that this object never null.
In case I want to compare this object with a constant
// CONS: it may return NPE if server return null (for example server do wrong) => app crash
// PRO: when we read this code, we have a mindset that object never null, if it null it is the server bug
object.equals(CONSTANT)
// CONS: When we read this code, we never know why and when object == null so it confusing.
// It not return NPE so code still running and we may have some problem with UI or logic
// PRO: it never return NPE
CONSTANT.equals(object)
Any suggestions would be much appreciated. For me, I prefer object.equals(CONSTANT) for known variable but my team not.
UPDATE I think
CONSTANT.equals(object)
similar too
try{
object.equals(CONSTANT)
catch(NullPointerException ex){
// don't handle or explain anything
}
A:
The practice of reversing the terms around the equality operator when one of the terms is a constant is called a Yoda conditional. You might encounter it in the following forms:
if( constant == variable ) instead of if ( variable == constant )
if( constant.equals( variable ) ) instead of if( variable.equals( constant ) )
Do not use Yoda conditionals. The Principle of Least Surprise is not just violated by this construct, it is gang-raped.
Also, this is a form of "defensive programming". Do not engage in defensive programming; engage in offensive programming instead. Read Trevor Jim's post Postel's law is not for you.
Also, do not blindly follow some advice just because someone calls it a "best practice". Who says it is a best practice?
Is it just a couple of folks out there? then by definition, they are not entitled to dress their subjective opinion with an objective title like "best practice".
Is it the majority of the industry? The majority is usually wrong. (Some might even say always wrong, watch Paul Rulkens @ TEDxMaastricht 2014)
Is it virtually everyone in the industry? Then clearly, the industry is engaging in groupthink.
Here are the reasons often cited for using Yoda conditionals, and their rebuttals:
Alleged reason #1
Statement: It will catch accidental use of the assignment operator where the equality operator was intended.
Rebuttal: Such accidental use should be impossible because your compiler or your IDE should be issuing a warning if you try to do this. If you are not receiving a warning, then you have other, much bigger problems in need of solving, i.e. using the wrong programming language, using the wrong IDE, or trying to write code without first having figured out how to enable all warnings.
Alleged reason #2
Statement: It works even if the variable accidentally happens to be null.
Rebuttal: No, it does not work; it silently fails. If you follow offensive programming, the definition of "it works" is that it must produce correct results when given valid input, and it must deliberately fail when given invalid input. So, there are two possibilities: either the variable may legitimately be null, or it may not.
if the variable may legitimately be null, then explicitly check against null.
if the variable may not be null, then write the code so that it will not fail to fail in the event that the variable is in fact null.
| Compare constant equals object or object equal constant | Follow Java best-java-coding-practices.htm, they say we need call .equals on known string constants rather than UNKNOWN variable
String string = new Test().getString();
// always compare like this, this will never throw NPE
System.out.println("CONSTANT.equals(string):"+CONSTANT.equals(string));
System.out.println("Comparision like string.equals(CONSTANT) may throw NullPointerException");
// next statement will throw NPE
System.out.println("string.equals(CONSTANT):"+string.equals(CONSTANT));
So how about KNOWN variable? Should we still use this way or not?
For example, if I receive an object from server and server notify that this object never null.
In case I want to compare this object with a constant
// CONS: it may return NPE if server return null (for example server do wrong) => app crash
// PRO: when we read this code, we have a mindset that object never null, if it null it is the server bug
object.equals(CONSTANT)
// CONS: When we read this code, we never know why and when object == null so it confusing.
// It not return NPE so code still running and we may have some problem with UI or logic
// PRO: it never return NPE
CONSTANT.equals(object)
Any suggestions would be much appreciated. For me, I prefer object.equals(CONSTANT) for known variable but my team not.
UPDATE I think
CONSTANT.equals(object)
similar too
try{
object.equals(CONSTANT)
catch(NullPointerException ex){
// don't handle or explain anything
}
| [
"The practice of reversing the terms around the equality operator when one of the terms is a constant is called a Yoda conditional. You might encounter it in the following forms:\n\nif( constant == variable ) instead of if ( variable == constant )\nif( constant.equals( variable ) ) instead of if( variable.equals( constant ) )\n\nDo not use Yoda conditionals. The Principle of Least Surprise is not just violated by this construct, it is gang-raped.\nAlso, this is a form of \"defensive programming\". Do not engage in defensive programming; engage in offensive programming instead. Read Trevor Jim's post Postel's law is not for you.\nAlso, do not blindly follow some advice just because someone calls it a \"best practice\". Who says it is a best practice?\n\nIs it just a couple of folks out there? then by definition, they are not entitled to dress their subjective opinion with an objective title like \"best practice\".\nIs it the majority of the industry? The majority is usually wrong. (Some might even say always wrong, watch Paul Rulkens @ TEDxMaastricht 2014)\nIs it virtually everyone in the industry? Then clearly, the industry is engaging in groupthink.\n\nHere are the reasons often cited for using Yoda conditionals, and their rebuttals:\n\nAlleged reason #1\n\nStatement: It will catch accidental use of the assignment operator where the equality operator was intended.\nRebuttal: Such accidental use should be impossible because your compiler or your IDE should be issuing a warning if you try to do this. If you are not receiving a warning, then you have other, much bigger problems in need of solving, i.e. using the wrong programming language, using the wrong IDE, or trying to write code without first having figured out how to enable all warnings.\n\n\nAlleged reason #2\n\nStatement: It works even if the variable accidentally happens to be null.\nRebuttal: No, it does not work; it silently fails. If you follow offensive programming, the definition of \"it works\" is that it must produce correct results when given valid input, and it must deliberately fail when given invalid input. So, there are two possibilities: either the variable may legitimately be null, or it may not.\n\nif the variable may legitimately be null, then explicitly check against null.\nif the variable may not be null, then write the code so that it will not fail to fail in the event that the variable is in fact null.\n\n\n\n\n\n"
] | [
0
] | [] | [] | [
"coding_style",
"java"
] | stackoverflow_0045211817_coding_style_java.txt |
Q:
How to get first visible row after filter in vba? (worked before but not anymore)
I have this code that filters a range and then gets the first visible line's row at its beginning.
I used to do it in this very code before, and it worked just fine. I had looked up different methods (currentregion, specialcells etc) and decided to go with specialcells.
But then I changed my code just a little, and had to do it twice -same original set of data gets filtered and first rowed, and then I do it again with another file. But even though both the data and all code related to my first set remained unchanged, it keeps throwing 'error 1004 - error defined by application or object' on a = wsf.AutoFilter.Range.Offset(1, 0).SpecialCells(xlCellTypeVisible)(1).Row ('a' just being a variable to stock the row number).
Again, 'wsf' is defined from the same file with the same data at the same moment in the code and nothing else related to it changed. I merely moved up 'wsa' 's declaration, which shouldn't interfere in any way.
I looked up everywhere for what could be wrong but couldn't find anything. Tried any other method I could find. At some point it stopped throwing an error but the row returned was always, without fault, 1, despite changing either the offset row value or the specialcells row value... instead of 48k as it should in my tests.
What's more, its twin 3 lines below works alright. Both files are xlsm, both sheets are correctly set and filtered.
I'm very confused and tired bout this.
Sorry I only ask you guys stupid stuff like that but I can't stand it anymore.
Dim deb As Date: deb = Now()
Dim owa As Workbook: Set owa = ThisWorkbook
Dim ows As Worksheet: Set ows = owa.Worksheets("Feuil1")
Dim PremAg As Range: Set PremAg = ows.Range("w2")
Dim LastAg As Range: Set LastAg = ows.Range("w" & ows.Cells(Rows.Count, "w").End(xlUp).Row)
Dim RngAg As Range: Set RngAg = ows.Range(PremAg, LastAg) 'full list of criterias I'll apply
Dim CellClient As Range
Dim CellFact As Range
Dim Agence As String
Dim wba As Workbook 'file with clients' info
Dim wsa As Worksheet 'relevant sheet
Dim RngClient As Range 'full list of clients' IDs according to client file
Dim RngFact As Range 'same but for invoices file
Dim Poubelle As Range 'Trash that stocks used up invoices to then delete them once I used them
Dim n As Integer
n = 0
Dim wbf As Workbook 'invoices file
Dim wsf As Worksheet 'relevant sheet for invoices
Dim a As Integer
Dim b As Integer
Dim c As Integer
Dim d As Integer
Set wbf = Workbooks.Open("C:\Users\QNS691\OneDrive\Documents\Excel\par agence 5\facts torturées2.xlsm")
Set wsf = wbf.Worksheets(1)
Set wba = Workbooks.Open("C:\Users\QNS691\OneDrive\Documents\Excel\par agence 5\full.xlsm")
Set wsa = wba.Worksheets(1)
Application.DisplayAlerts = False
For Each CellAg In RngAg
wsf.Range("A1").AutoFilter field:=7, Criteria1:=CStr(CellAg) 'filter works well
a = wsf.AutoFilter.Range.Offset(1, 0).SpecialCells(xlCellTypeVisible)(1).Row 'and then there's this little thing that worked just fine but then threw a fit
b = wsf.Range("g1").End(xlDown).Row
wsa.Range("A1").AutoFilter field:=7, Criteria1:=CellAg
c = wsa.AutoFilter.Range.Offset(1, 0).SpecialCells(xlCellTypeVisible)(1).Row 'while that one works ok...
d = wsa.Range("g1").End(xlDown).Row
Set RngFact = wsf.Range("g" & a, "g" & b)
Set RngClient = wsa.Range("g" & c, "g" & d)
For Each CellClient In RngClient
n = 0
ag = wsa.Cells(CellClient.Row, 7)
For Each CellFact In RngFact
If CellClient = CellFact And ag = wsf.Cells(CellFact.Row, 7) Then
n = n + 1
If Poubelle Is Nothing Then
Set Poubelle = CellFact
Else
Set Poubelle = Union(Poubelle, CellFact)
End If
End If
Next CellFact
If Not Poubelle Is Nothing Then
'Debug.Print Poubelle.Address
Poubelle.EntireRow.Delete
End If
Set Poubelle = Nothing
If n > 1 Then
wsa.Cells(CellClient.Row, 9) = n
End If
Next CellClient
wba.Save
wbf.Save
Next CellAg
Application.DisplayAlerts = True
MsgBox y & Chr(10) & deb & " " & Now()
End Sub
A:
I can't reproduce the error you are getting with a number of scenarios I know can cause problems with filtered ranges. For instance, wsf.AutoFilter.Range.Offset(1, 0) will include the visible empty row below the filtered range. Also b = wsf.Range("g1").End(xlDown).Row can move to the end of the sheet (row 1048576) if no data match the criteria.
A different solution without using filters would be to use a dictionary object. Note I have disabled the row delete line and replaced it with a colour marker for testing purposes. For example ;
Option Explicit
Sub demo()
Const FOLDER = "C:\Users\QNS691\OneDrive\Documents\Excel\par agence 5\"
Dim owa As Workbook, ows As Worksheet, dictCrit
Dim wbf As Workbook, wsf As Worksheet
Dim wba As Workbook, wsa As Worksheet
Dim i As Long, r As Long, n As Long, k, t0 As Single
t0 = Timer
Set owa = ThisWorkbook
Set ows = owa.Sheets("Feuil1")
' dictionary with criteria as key, counts as values
Set dictCrit = CreateObject("Scripting.Dictionary")
' criteria to apply
With ows
r = .Cells(.Rows.Count, "W").End(xlUp).Row
If r < 2 Then
MsgBox "No criteria in col W on sheet " & ows.Name, vbCritical
Exit Sub
End If
'build dictionary
For i = 2 To r
k = Trim(.Cells(i, "W"))
If dictCrit.exists(k) Then
MsgBox "Duplicate criteria: " & k, vbCritical, ows.Name & " Col W Row " & i
Exit Sub
Else
dictCrit.Add k, 0
End If
Next
End With
Set wba = Workbooks.Open(FOLDER & "full.xlsm")
Set wsa = wba.Worksheets(1)
With wsa
r = .Cells(.Rows.Count, "G").End(xlUp).Row
If r < 2 Then
MsgBox "No data in col G on sheet " & wsa.Name, vbCritical
Exit Sub
End If
End With
Set wbf = Workbooks.Open(FOLDER & "facts torturées2.xlsm")
Set wsf = wbf.Worksheets(1)
Dim colG As String ' col 7
With wsf
r = .Cells(.Rows.Count, "G").End(xlUp).Row
If r < 2 Then
MsgBox "No data in col G on sheet " & wsf.Name, vbCritical
Exit Sub
End If
' scan up facts sheet counting record and deleting matches
Application.ScreenUpdating = False
For i = r To 2 Step -1
colG = Trim(.Cells(i, "G"))
If dictCrit.exists(colG) Then
dictCrit(colG) = dictCrit(colG) + 1
.Cells(i, "G").Interior.Color = vbRed
'.Rows(i).Delete
n = n + 1
End If
Next
Application.ScreenUpdating = True
End With
' delete zero counts
For Each k In dictCrit.keys
If dictCrit(k) = 0 Then dictCrit.Remove k
Next
' update full.xlsm with counts
With wsa
r = .Cells(.Rows.Count, "G").End(xlUp).Row
' scan down and update counts
Application.ScreenUpdating = False
For i = 2 To r
colG = Trim(.Cells(i, "G"))
If dictCrit.exists(colG) Then
.Cells(i, "I") = dictCrit(colG) ' col 9
dictCrit.Remove colG
End If
Next
Application.ScreenUpdating = True
End With
'wba.Save
'wbf.Save
' check all updated
If dictCrit.Count > 0 Then
MsgBox "Counts not updated for" & vbLf & Join(dictCrit.keys, vbLf), vbCritical
Else
MsgBox n & " rows deleted", vbInformation, "Run time = " & Format(Timer - t0, "0.0 secs")
End If
End Sub
| How to get first visible row after filter in vba? (worked before but not anymore) | I have this code that filters a range and then gets the first visible line's row at its beginning.
I used to do it in this very code before, and it worked just fine. I had looked up different methods (currentregion, specialcells etc) and decided to go with specialcells.
But then I changed my code just a little, and had to do it twice -same original set of data gets filtered and first rowed, and then I do it again with another file. But even though both the data and all code related to my first set remained unchanged, it keeps throwing 'error 1004 - error defined by application or object' on a = wsf.AutoFilter.Range.Offset(1, 0).SpecialCells(xlCellTypeVisible)(1).Row ('a' just being a variable to stock the row number).
Again, 'wsf' is defined from the same file with the same data at the same moment in the code and nothing else related to it changed. I merely moved up 'wsa' 's declaration, which shouldn't interfere in any way.
I looked up everywhere for what could be wrong but couldn't find anything. Tried any other method I could find. At some point it stopped throwing an error but the row returned was always, without fault, 1, despite changing either the offset row value or the specialcells row value... instead of 48k as it should in my tests.
What's more, its twin 3 lines below works alright. Both files are xlsm, both sheets are correctly set and filtered.
I'm very confused and tired bout this.
Sorry I only ask you guys stupid stuff like that but I can't stand it anymore.
Dim deb As Date: deb = Now()
Dim owa As Workbook: Set owa = ThisWorkbook
Dim ows As Worksheet: Set ows = owa.Worksheets("Feuil1")
Dim PremAg As Range: Set PremAg = ows.Range("w2")
Dim LastAg As Range: Set LastAg = ows.Range("w" & ows.Cells(Rows.Count, "w").End(xlUp).Row)
Dim RngAg As Range: Set RngAg = ows.Range(PremAg, LastAg) 'full list of criterias I'll apply
Dim CellClient As Range
Dim CellFact As Range
Dim Agence As String
Dim wba As Workbook 'file with clients' info
Dim wsa As Worksheet 'relevant sheet
Dim RngClient As Range 'full list of clients' IDs according to client file
Dim RngFact As Range 'same but for invoices file
Dim Poubelle As Range 'Trash that stocks used up invoices to then delete them once I used them
Dim n As Integer
n = 0
Dim wbf As Workbook 'invoices file
Dim wsf As Worksheet 'relevant sheet for invoices
Dim a As Integer
Dim b As Integer
Dim c As Integer
Dim d As Integer
Set wbf = Workbooks.Open("C:\Users\QNS691\OneDrive\Documents\Excel\par agence 5\facts torturées2.xlsm")
Set wsf = wbf.Worksheets(1)
Set wba = Workbooks.Open("C:\Users\QNS691\OneDrive\Documents\Excel\par agence 5\full.xlsm")
Set wsa = wba.Worksheets(1)
Application.DisplayAlerts = False
For Each CellAg In RngAg
wsf.Range("A1").AutoFilter field:=7, Criteria1:=CStr(CellAg) 'filter works well
a = wsf.AutoFilter.Range.Offset(1, 0).SpecialCells(xlCellTypeVisible)(1).Row 'and then there's this little thing that worked just fine but then threw a fit
b = wsf.Range("g1").End(xlDown).Row
wsa.Range("A1").AutoFilter field:=7, Criteria1:=CellAg
c = wsa.AutoFilter.Range.Offset(1, 0).SpecialCells(xlCellTypeVisible)(1).Row 'while that one works ok...
d = wsa.Range("g1").End(xlDown).Row
Set RngFact = wsf.Range("g" & a, "g" & b)
Set RngClient = wsa.Range("g" & c, "g" & d)
For Each CellClient In RngClient
n = 0
ag = wsa.Cells(CellClient.Row, 7)
For Each CellFact In RngFact
If CellClient = CellFact And ag = wsf.Cells(CellFact.Row, 7) Then
n = n + 1
If Poubelle Is Nothing Then
Set Poubelle = CellFact
Else
Set Poubelle = Union(Poubelle, CellFact)
End If
End If
Next CellFact
If Not Poubelle Is Nothing Then
'Debug.Print Poubelle.Address
Poubelle.EntireRow.Delete
End If
Set Poubelle = Nothing
If n > 1 Then
wsa.Cells(CellClient.Row, 9) = n
End If
Next CellClient
wba.Save
wbf.Save
Next CellAg
Application.DisplayAlerts = True
MsgBox y & Chr(10) & deb & " " & Now()
End Sub
| [
"I can't reproduce the error you are getting with a number of scenarios I know can cause problems with filtered ranges. For instance, wsf.AutoFilter.Range.Offset(1, 0) will include the visible empty row below the filtered range. Also b = wsf.Range(\"g1\").End(xlDown).Row can move to the end of the sheet (row 1048576) if no data match the criteria.\nA different solution without using filters would be to use a dictionary object. Note I have disabled the row delete line and replaced it with a colour marker for testing purposes. For example ;\nOption Explicit\n\nSub demo()\n\n Const FOLDER = \"C:\\Users\\QNS691\\OneDrive\\Documents\\Excel\\par agence 5\\\"\n\n Dim owa As Workbook, ows As Worksheet, dictCrit\n Dim wbf As Workbook, wsf As Worksheet\n Dim wba As Workbook, wsa As Worksheet\n Dim i As Long, r As Long, n As Long, k, t0 As Single\n t0 = Timer\n \n Set owa = ThisWorkbook\n Set ows = owa.Sheets(\"Feuil1\")\n \n ' dictionary with criteria as key, counts as values\n Set dictCrit = CreateObject(\"Scripting.Dictionary\")\n \n ' criteria to apply\n With ows\n r = .Cells(.Rows.Count, \"W\").End(xlUp).Row\n If r < 2 Then\n MsgBox \"No criteria in col W on sheet \" & ows.Name, vbCritical\n Exit Sub\n End If\n 'build dictionary\n For i = 2 To r\n k = Trim(.Cells(i, \"W\"))\n If dictCrit.exists(k) Then\n MsgBox \"Duplicate criteria: \" & k, vbCritical, ows.Name & \" Col W Row \" & i\n Exit Sub\n Else\n dictCrit.Add k, 0\n End If\n Next\n End With\n \n Set wba = Workbooks.Open(FOLDER & \"full.xlsm\")\n Set wsa = wba.Worksheets(1)\n With wsa\n r = .Cells(.Rows.Count, \"G\").End(xlUp).Row\n If r < 2 Then\n MsgBox \"No data in col G on sheet \" & wsa.Name, vbCritical\n Exit Sub\n End If\n End With\n \n Set wbf = Workbooks.Open(FOLDER & \"facts torturées2.xlsm\")\n Set wsf = wbf.Worksheets(1)\n Dim colG As String ' col 7\n \n With wsf\n r = .Cells(.Rows.Count, \"G\").End(xlUp).Row\n If r < 2 Then\n MsgBox \"No data in col G on sheet \" & wsf.Name, vbCritical\n Exit Sub\n End If\n \n ' scan up facts sheet counting record and deleting matches\n Application.ScreenUpdating = False\n For i = r To 2 Step -1\n colG = Trim(.Cells(i, \"G\"))\n If dictCrit.exists(colG) Then\n dictCrit(colG) = dictCrit(colG) + 1\n .Cells(i, \"G\").Interior.Color = vbRed\n '.Rows(i).Delete\n n = n + 1\n End If\n Next\n Application.ScreenUpdating = True\n End With\n \n ' delete zero counts\n For Each k In dictCrit.keys\n If dictCrit(k) = 0 Then dictCrit.Remove k\n Next\n \n ' update full.xlsm with counts\n With wsa\n r = .Cells(.Rows.Count, \"G\").End(xlUp).Row\n ' scan down and update counts\n Application.ScreenUpdating = False\n For i = 2 To r\n colG = Trim(.Cells(i, \"G\"))\n If dictCrit.exists(colG) Then\n .Cells(i, \"I\") = dictCrit(colG) ' col 9\n dictCrit.Remove colG\n End If\n Next\n Application.ScreenUpdating = True\n End With\n 'wba.Save\n 'wbf.Save\n \n ' check all updated\n If dictCrit.Count > 0 Then\n MsgBox \"Counts not updated for\" & vbLf & Join(dictCrit.keys, vbLf), vbCritical\n Else\n MsgBox n & \" rows deleted\", vbInformation, \"Run time = \" & Format(Timer - t0, \"0.0 secs\")\n End If\nEnd Sub\n\n"
] | [
0
] | [] | [] | [
"excel",
"vba"
] | stackoverflow_0074657436_excel_vba.txt |
Q:
parse list containing html-like elements into nested json using Python
I'm not the best at converting certain sections of a list to nested Json and was hoping for some guidance. I have a list containing data like below:
['<h5> 1|',
'<h6>Type of Care|',
'<h6>SA|Substance use treatment|',
'<h6>DT|Detoxification |',
'<h6>HH|Transitional housing, halfway house, or sober home|',
'<h6>SUMH|Treatment for co-occurring serious mental health | illness/serious emotional disturbance and substance | use disorders|',
'',
'<h5> 2|',
'<h6>Telemedicine|',
'<h6>TELE|TelemedicineTelemedicine/telehealth|',
'']
I want to first remove all records in the list that have no content, then I want to convert the records that contain a tag like "<H5>" into the key and group the records that contain "<h6>" into values like this json output:
"codekey": [
{
"category": [
{
"key": 1,
"value": "Type of Care"
}
],
"codes": [
{
"key": "SA",
"value": "Substance use treatment"
},
{
"key": "DT",
"value": "Detoxification"
},
{
"key": "HH",
"value": "Transitional housing, halfway house, or sober home"
},
{
"key": "SUMH",
"value": "Treatment for co-occurring serious mental health | illness/serious emotional disturbance and substance | use disorders|"
}
]
},
{
"category": [
{
"key": 2,
"value": "Telemedicine"
}
],
"codes": [
{
"key": "TELE",
"value": "TelemedicineTelemedicine/telehealth"
}
]
}
]
I think I need to perform a loop but I'm getting stuck on how to create the 'key/value' relationship. I think I also need to use a regex but I'm just not the best at Python to conceptually convert the data to the required output. Any advice on training I could look up to do this OR any preliminary suggestions on how to get started? Thank you!
A:
Considering your format remains constant. Here's a flexible solution that is configurable:
class Separator():
def __init__(self, data, title, sep, splitter):
self.data = data # the data
self.title = title # the starting in your case "<h5>"
self.sep = sep # the point where you want to update res
self.splitter = splitter # the separator between key | value
self.res = [] # final res
self.tempDict = {} # tempDict to append
def clearString(self, string, *args):
for arg in args:
string = string.replace(arg, '') # replace every arg to ''
return string.strip()
def updateDict(self, val):
if val == self.sep:
self.res.append(self.tempDict) # update res
self.tempDict = {} # renew tempDict to append
else:
try:
if self.title in val: # check if it "<h5>" in your case
self.tempDict["category"] = [{"key": self.clearString(val, self.title, self.splitter), "value": self.clearString(self.data[self.data.index(val)+1],'<h6>', '|')}] # get the next value
elif self.tempDict["category"][0]["value"] != self.clearString(val, '<h6>', '|'): # check if it is not the "value" of h6 in "category"
val = self.clearString(val,"<h6>").split("|")
if "codes" not in self.tempDict.keys(): self.tempDict["codes"] = [] # create key if not there
self.tempDict["codes"].append({"key": val[0], "value": val[1]})
except: # avoid Exceptions
pass
return self.res
object = Separator(data, '<h5>', '', '|')
for val in data:
res = object.updateDict(val)
print(res)
Output for your Sample Input Provided:
[
{
'category': [{'key': '1', 'value': 'Type of Care'}],
'codes': [
{'key': 'SA', 'value': 'Substance use treatment'},
{'key': 'DT', 'value': 'Detoxification '},
{
'key': 'HH',
'value': 'Transitional housing, halfway house, or sober home',
},
{
'key': 'SUMH',
'value': 'Treatment for co-occurring serious mental health ',
},
],
},
{
'category': [{'key': '2', 'value': 'Telemedicine'}],
'codes': [
{'key': 'TELE', 'value': 'TelemedicineTelemedicine/telehealth'},
],
},
]
| parse list containing html-like elements into nested json using Python | I'm not the best at converting certain sections of a list to nested Json and was hoping for some guidance. I have a list containing data like below:
['<h5> 1|',
'<h6>Type of Care|',
'<h6>SA|Substance use treatment|',
'<h6>DT|Detoxification |',
'<h6>HH|Transitional housing, halfway house, or sober home|',
'<h6>SUMH|Treatment for co-occurring serious mental health | illness/serious emotional disturbance and substance | use disorders|',
'',
'<h5> 2|',
'<h6>Telemedicine|',
'<h6>TELE|TelemedicineTelemedicine/telehealth|',
'']
I want to first remove all records in the list that have no content, then I want to convert the records that contain a tag like "<H5>" into the key and group the records that contain "<h6>" into values like this json output:
"codekey": [
{
"category": [
{
"key": 1,
"value": "Type of Care"
}
],
"codes": [
{
"key": "SA",
"value": "Substance use treatment"
},
{
"key": "DT",
"value": "Detoxification"
},
{
"key": "HH",
"value": "Transitional housing, halfway house, or sober home"
},
{
"key": "SUMH",
"value": "Treatment for co-occurring serious mental health | illness/serious emotional disturbance and substance | use disorders|"
}
]
},
{
"category": [
{
"key": 2,
"value": "Telemedicine"
}
],
"codes": [
{
"key": "TELE",
"value": "TelemedicineTelemedicine/telehealth"
}
]
}
]
I think I need to perform a loop but I'm getting stuck on how to create the 'key/value' relationship. I think I also need to use a regex but I'm just not the best at Python to conceptually convert the data to the required output. Any advice on training I could look up to do this OR any preliminary suggestions on how to get started? Thank you!
| [
"Considering your format remains constant. Here's a flexible solution that is configurable:\nclass Separator():\n def __init__(self, data, title, sep, splitter):\n self.data = data # the data\n self.title = title # the starting in your case \"<h5>\"\n self.sep = sep # the point where you want to update res\n self.splitter = splitter # the separator between key | value\n self.res = [] # final res\n self.tempDict = {} # tempDict to append\n def clearString(self, string, *args):\n for arg in args:\n string = string.replace(arg, '') # replace every arg to ''\n return string.strip()\n def updateDict(self, val):\n if val == self.sep:\n self.res.append(self.tempDict) # update res\n self.tempDict = {} # renew tempDict to append\n else:\n try:\n if self.title in val: # check if it \"<h5>\" in your case\n self.tempDict[\"category\"] = [{\"key\": self.clearString(val, self.title, self.splitter), \"value\": self.clearString(self.data[self.data.index(val)+1],'<h6>', '|')}] # get the next value\n elif self.tempDict[\"category\"][0][\"value\"] != self.clearString(val, '<h6>', '|'): # check if it is not the \"value\" of h6 in \"category\"\n val = self.clearString(val,\"<h6>\").split(\"|\")\n if \"codes\" not in self.tempDict.keys(): self.tempDict[\"codes\"] = [] # create key if not there\n self.tempDict[\"codes\"].append({\"key\": val[0], \"value\": val[1]})\n except: # avoid Exceptions\n pass\n return self.res\nobject = Separator(data, '<h5>', '', '|')\nfor val in data:\n res = object.updateDict(val)\nprint(res)\n\nOutput for your Sample Input Provided:\n[\n {\n 'category': [{'key': '1', 'value': 'Type of Care'}],\n 'codes': [\n {'key': 'SA', 'value': 'Substance use treatment'},\n {'key': 'DT', 'value': 'Detoxification '},\n {\n 'key': 'HH',\n 'value': 'Transitional housing, halfway house, or sober home',\n },\n {\n 'key': 'SUMH',\n 'value': 'Treatment for co-occurring serious mental health ',\n },\n ],\n },\n {\n 'category': [{'key': '2', 'value': 'Telemedicine'}],\n 'codes': [\n {'key': 'TELE', 'value': 'TelemedicineTelemedicine/telehealth'},\n ],\n },\n]\n\n"
] | [
0
] | [] | [] | [
"json",
"python"
] | stackoverflow_0074661204_json_python.txt |
Q:
Ask only for Mic permission Opentok
I'm using OpenTok RTC. And I need to only grant permission for Mic only.
I have been reading the documentations and I'm using the getDevices which asks for camera and mic permissions. And I only need Mic permission
I can't find a way around this yet
Link for getDevices here
A:
OpenTok RTC provides two methods for granting permissions for microphone access. The first is using the JavaScript API when you create the session. You can set the media mode to "audio-only" which will only grant access to the microphone. The second is using the Android/iOS SDK. When the session is created, you can set the AudioTrackConstraints to "AudioTrackConstraints.audioOnly()" which will also only grant access to the microphone.
A:
For anyone who might encounter this, set videoSource to false when initializing a publisher.
link for reference https://tokbox.com/developer/sdks/js/reference/OT.html#initPublisher
| Ask only for Mic permission Opentok | I'm using OpenTok RTC. And I need to only grant permission for Mic only.
I have been reading the documentations and I'm using the getDevices which asks for camera and mic permissions. And I only need Mic permission
I can't find a way around this yet
Link for getDevices here
| [
"OpenTok RTC provides two methods for granting permissions for microphone access. The first is using the JavaScript API when you create the session. You can set the media mode to \"audio-only\" which will only grant access to the microphone. The second is using the Android/iOS SDK. When the session is created, you can set the AudioTrackConstraints to \"AudioTrackConstraints.audioOnly()\" which will also only grant access to the microphone.\n",
"For anyone who might encounter this, set videoSource to false when initializing a publisher.\nlink for reference https://tokbox.com/developer/sdks/js/reference/OT.html#initPublisher\n"
] | [
0,
0
] | [] | [] | [
"javascript",
"opentok",
"permissions",
"rational_team_concert",
"reactjs"
] | stackoverflow_0074674974_javascript_opentok_permissions_rational_team_concert_reactjs.txt |
Q:
How to detect if the value entered in HTML CSS input is a number
I wrote a code like this:
Age: <input type="text" id="age">
And JavaScript:
<script>
function validateForm() {
let age = document.getElementById("age").value;
if (age) // is number?
}
</script>
I want the input to give an error if the value entered is a letter, I will only accept numbers. How can I tell if the entered value is not a number?
A:
You can use isNaN() to do it
function validateData() {
let age = document.getElementById("age").value;
if(isNaN(age)){
console.log(`${age} is not a number`)
}else{
console.log(`${age} is a number`)
}
}
Age: <input type="text" id="age" onchange ="validateData()">
A:
Without using/depending on CSS you can apply the style to HTML elements using plain JavaScript as follows.
function validateForm() {
let age = document.getElementById("age").value;
if (age) // is number?
if (isNaN(age)) {
document.getElementById("age").style.borderColor="red";
}
}
Age: <input type="text" onchange ="validateForm()" id="age">
| How to detect if the value entered in HTML CSS input is a number | I wrote a code like this:
Age: <input type="text" id="age">
And JavaScript:
<script>
function validateForm() {
let age = document.getElementById("age").value;
if (age) // is number?
}
</script>
I want the input to give an error if the value entered is a letter, I will only accept numbers. How can I tell if the entered value is not a number?
| [
"You can use isNaN() to do it\n\n\nfunction validateData() {\n let age = document.getElementById(\"age\").value;\n if(isNaN(age)){\n console.log(`${age} is not a number`)\n }else{\n console.log(`${age} is a number`)\n } \n}\nAge: <input type=\"text\" id=\"age\" onchange =\"validateData()\">\n\n\n\n",
"Without using/depending on CSS you can apply the style to HTML elements using plain JavaScript as follows.\n\n\n function validateForm() {\n let age = document.getElementById(\"age\").value;\n if (age) // is number?\n if (isNaN(age)) {\n document.getElementById(\"age\").style.borderColor=\"red\";\n }\n }\n Age: <input type=\"text\" onchange =\"validateForm()\" id=\"age\">\n\n\n\n"
] | [
0,
0
] | [] | [] | [
"html",
"javascript"
] | stackoverflow_0074675152_html_javascript.txt |
Q:
My QT connect works from parent to child, but not vice versa
I am making a QT application. I have a MainWindow1 class and a FirstTab class. The MainWindow1 class is a QMainWindow and inside I created this signal ->
mainwindow1.h
signals:
void loadDateSig(QString strDate);
And in FirstTab I created this slot->
FirstTab.h
public slots:
void loadDate(QString date);
Q_Object is defined in both MainWindow1 and in FirstTab. Now in my MainWindow1 constructor, this is how I start ->
mainwindow1.cpp
MainWindow1::MainWindow1()
: mainWidget(new QTabWidget) {
setCentralWidget(mainWidget);
mainWidget->addTab(new FirstTab(mainWidget), "First");
createActions();
createStatusBar();
And below that, I start making some connections. I connect a few items to change the status that the document was modified. You can see them here, and these work with no issues ->
connect(mainWidget->findChild<QTimeEdit *>("timeEdit"), &QTimeEdit::editingFinished,
this, &MainWindow1::documentWasModified);
connect(mainWidget->findChild<QDateEdit *>("dateEdit"), &QTimeEdit::editingFinished,
this, &MainWindow1::documentWasModified);
connect(mainWidget->findChild<QLineEdit *>("shooterEdit"), &QLineEdit::textEdited,
this, &MainWindow1::documentWasModified);
connect(mainWidget->findChild<QLineEdit *>("recorderEdit"), &QLineEdit::textEdited,
this, &MainWindow1::documentWasModified);
connect(mainWidget->findChild<QDoubleSpinBox *>("tempCSpin"), &QDoubleSpinBox::editingFinished,
this, &MainWindow1::documentWasModified);
connect(mainWidget->findChild<QDoubleSpinBox *>("tempFSpin"), &QDoubleSpinBox::editingFinished,
this, &MainWindow1::documentWasModified);
Now I try to connect my loadDateSig and loadDate. I have tried both creating the connect in the child, and in the parent class. But both give me an error. I tried to connect similar to how I connected previosly ->
connect(this, &MainWindow1::loadDateSig, mainWidget->findChild<QWidget *>("tab1"), &FirstTab::loadDate);
However, this doesn't seem to work. I get these errors ->
====================[ Build | all | Debug ]=====================================
"E:\Program Files (x86)\CLion 2022.2.1\bin\cmake\win\bin\cmake.exe" --build E:\Projects\Galvion\cmake-build-debug --target all -j 9
[1/4] Automatic MOC and UIC for target Galvion
[2/3] Building CXX object CMakeFiles/Galvion.dir/mainwindow1.cpp.obj
FAILED: CMakeFiles/Galvion.dir/mainwindow1.cpp.obj
E:\PROGRA~1\CLION2~1.1\bin\mingw\bin\G__~1.EXE -DMINGW_HAS_SECURE_API=1 -DQT_CORE_LIB -DQT_GUI_LIB -DQT_WIDGETS_LIB -DUNICODE -DWIN32 -DWIN64 -D_ENABLE_EXTENDED_ALIGNED_STORAGE -D_UNICODE -D_WIN64 -IE:/Projects/Galvion/cmake-build-debug -IE:/Projects/Galvion -IE:/Projects/Galvion/cmake-build-debug/Galvion_autogen/include -isystem C:/Qt/6.4.1/mingw_64/include/QtCore -isystem C:/Qt/6.4.1/mingw_64/include -isystem C:/Qt/6.4.1/mingw_64/mkspecs/win32-g++ -isystem C:/Qt/6.4.1/mingw_64/include/QtGui -isystem C:/Qt/6.4.1/mingw_64/include/QtWidgets -g -MD -MT CMakeFiles/Galvion.dir/mainwindow1.cpp.obj -MF CMakeFiles\Galvion.dir\mainwindow1.cpp.obj.d -o CMakeFiles/Galvion.dir/mainwindow1.cpp.obj -c E:/Projects/Galvion/mainwindow1.cpp
E:/Projects/Galvion/mainwindow1.cpp: In constructor 'MainWindow1::MainWindow1()':
E:/Projects/Galvion/mainwindow1.cpp:26:12: error: no matching function for call to 'MainWindow1::connect(MainWindow1*, void (MainWindow1::*)(QString), QWidget*, void (FirstTab::*)(QString))'
26 | connect(this, &MainWindow1::loadDateSig, mainWidget->findChild<QWidget *>("tab1"), &FirstTab::loadDate);
| ~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In file included from C:/Qt/6.4.1/mingw_64/include/QtCore/qabstractanimation.h:7,
from C:/Qt/6.4.1/mingw_64/include/QtCore/QtCore:10,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgetsDepends:3,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QTWidgets:3,
from E:/Projects/Galvion/mainwindow1.h:7,
from E:/Projects/Galvion/mainwindow1.cpp:1:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:201:43: note: candidate: 'static QMetaObject::Connection QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, const typename QtPrivate::FunctionPointer<Func2>::Object*, Func2, Qt::ConnectionType) [with Func1 = void (MainWindow1::*)(QString); Func2 = void (FirstTab::*)(QString); typename QtPrivate::FunctionPointer<Func>::Object = MainWindow1; typename QtPrivate::FunctionPointer<Func2>::Object = FirstTab]' (near match)
201 | static inline QMetaObject::Connection connect(const typename QtPrivate::FunctionPointer<Func1>::Object *sender, Func1 signal,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:201:43: note: conversion of argument 3 would be ill-formed:
E:/Projects/Galvion/mainwindow1.cpp:26:78: error: invalid conversion from 'QWidget*' to 'const Object*' {aka 'const FirstTab*'} [-fpermissive]
26 | connect(this, &MainWindow1::loadDateSig, mainWidget->findChild<QWidget *>("tab1"), &FirstTab::loadDate);
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~
| |
| QWidget*
In file included from C:/Qt/6.4.1/mingw_64/include/QtCore/qabstractanimation.h:7,
from C:/Qt/6.4.1/mingw_64/include/QtCore/QtCore:10,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgetsDepends:3,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QTWidgets:3,
from E:/Projects/Galvion/mainwindow1.h:7,
from E:/Projects/Galvion/mainwindow1.cpp:1:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:233:13: note: candidate: 'template<class Func1, class Func2> static typename std::enable_if<((int)(QtPrivate::FunctionPointer<Func2>::ArgumentCount) >= 0), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, Func2)'
233 | connect(const typename QtPrivate::FunctionPointer<Func1>::Object *sender, Func1 signal, Func2 slot)
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:233:13: note: template argument deduction/substitution failed:
E:/Projects/Galvion/mainwindow1.cpp:26:12: note: candidate expects 3 arguments, 4 provided
26 | connect(this, &MainWindow1::loadDateSig, mainWidget->findChild<QWidget *>("tab1"), &FirstTab::loadDate);
| ~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In file included from C:/Qt/6.4.1/mingw_64/include/QtCore/qabstractanimation.h:7,
from C:/Qt/6.4.1/mingw_64/include/QtCore/QtCore:10,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgetsDepends:3,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QTWidgets:3,
from E:/Projects/Galvion/mainwindow1.h:7,
from E:/Projects/Galvion/mainwindow1.cpp:1:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:242:13: note: candidate: 'template<class Func1, class Func2> static typename std::enable_if<(((int)(QtPrivate::FunctionPointer<Func2>::ArgumentCount) >= 0) && (! QtPrivate::FunctionPointer<Func2>::IsPointerToMemberFunction)), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, const QObject*, Func2, Qt::ConnectionType)'
242 | connect(const typename QtPrivate::FunctionPointer<Func1>::Object *sender, Func1 signal, const QObject *context, Func2 slot,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:242:13: note: template argument deduction/substitution failed:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h: In substitution of 'template<class Func1, class Func2> static typename std::enable_if<(((int)(QtPrivate::FunctionPointer<Func2>::ArgumentCount) >= 0) && (! QtPrivate::FunctionPointer<Func2>::IsPointerToMemberFunction)), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, const QObject*, Func2, Qt::ConnectionType) [with Func1 = void (MainWindow1::*)(QString); Func2 = void (FirstTab::*)(QString)]':
E:/Projects/Galvion/mainwindow1.cpp:26:12: required from here
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:242:13: error: no type named 'type' in 'struct std::enable_if<false, QMetaObject::Connection>'
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:276:13: note: candidate: 'template<class Func1, class Func2> static typename std::enable_if<((QtPrivate::FunctionPointer<Func2>::ArgumentCount == -1) && (! is_convertible_v<Func2, const char*>)), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, Func2)'
276 | connect(const typename QtPrivate::FunctionPointer<Func1>::Object *sender, Func1 signal, Func2 slot)
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:276:13: note: template argument deduction/substitution failed:
E:/Projects/Galvion/mainwindow1.cpp:26:12: note: candidate expects 3 arguments, 4 provided
26 | connect(this, &MainWindow1::loadDateSig, mainWidget->findChild<QWidget *>("tab1"), &FirstTab::loadDate);
| ~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In file included from C:/Qt/6.4.1/mingw_64/include/QtCore/qabstractanimation.h:7,
from C:/Qt/6.4.1/mingw_64/include/QtCore/QtCore:10,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgetsDepends:3,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QTWidgets:3,
from E:/Projects/Galvion/mainwindow1.h:7,
from E:/Projects/Galvion/mainwindow1.cpp:1:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:287:13: note: candidate: 'template<class Func1, class Func2> static typename std::enable_if<((QtPrivate::FunctionPointer<Func2>::ArgumentCount == -1) && (! is_convertible_v<Func2, const char*>)), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, const QObject*, Func2, Qt::ConnectionType)'
287 | connect(const typename QtPrivate::FunctionPointer<Func1>::Object *sender, Func1 signal, const QObject *context, Func2 slot,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:287:13: note: template argument deduction/substitution failed:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h: In substitution of 'template<class Func1, class Func2> static typename std::enable_if<((QtPrivate::FunctionPointer<Func2>::ArgumentCount == -1) && (! is_convertible_v<Func2, const char*>)), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, const QObject*, Func2, Qt::ConnectionType) [with Func1 = void (MainWindow1::*)(QString); Func2 = void (FirstTab::*)(QString)]':
E:/Projects/Galvion/mainwindow1.cpp:26:12: required from here
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:287:13: error: no type named 'type' in 'struct std::enable_if<false, QMetaObject::Connection>'
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:181:36: note: candidate: 'static QMetaObject::Connection QObject::connect(const QObject*, const char*, const QObject*, const char*, Qt::ConnectionType)'
181 | static QMetaObject::Connection connect(const QObject *sender, const char *signal,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:181:79: note: no known conversion for argument 2 from 'void (MainWindow1::*)(QString)' to 'const char*'
181 | static QMetaObject::Connection connect(const QObject *sender, const char *signal,
| ~~~~~~~~~~~~^~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:184:36: note: candidate: 'static QMetaObject::Connection QObject::connect(const QObject*, const QMetaMethod&, const QObject*, const QMetaMethod&, Qt::ConnectionType)'
184 | static QMetaObject::Connection connect(const QObject *sender, const QMetaMethod &signal,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:184:86: note: no known conversion for argument 2 from 'void (MainWindow1::*)(QString)' to 'const QMetaMethod&'
184 | static QMetaObject::Connection connect(const QObject *sender, const QMetaMethod &signal,
| ~~~~~~~~~~~~~~~~~~~^~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:432:32: note: candidate: 'QMetaObject::Connection QObject::connect(const QObject*, const char*, const char*, Qt::ConnectionType) const'
432 | inline QMetaObject::Connection QObject::connect(const QObject *asender, const char *asignal,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:432:85: note: no known conversion for argument 2 from 'void (MainWindow1::*)(QString)' to 'const char*'
432 | inline QMetaObject::Connection QObject::connect(const QObject *asender, const char *asignal,
| ~~~~~~~~~~~~^~~~~~~
ninja: build stopped: subcommand failed.
Could anyone help me figure out why this happens?
I tried to create the connect in the FirstTab class. Changing the parent in mainwindow1.cpp form mainWidget parent to this so the child class can access the signal ->
mainWidget->addTab(new FirstTab(this), "First");
And then I create this connect in FirstTab.cpp ->
connect(parent, &MainWindow1::loadDateSig, this, &FirstTab::loadDate);
I get this error when trying to build ->
====================[ Build | all | Debug ]=====================================
"E:\Program Files (x86)\CLion 2022.2.1\bin\cmake\win\bin\cmake.exe" --build E:\Projects\Galvion\cmake-build-debug --target all -j 9
[1/5] Automatic MOC and UIC for target Galvion
[2/4] Building CXX object CMakeFiles/Galvion.dir/FirstTab.cpp.obj
FAILED: CMakeFiles/Galvion.dir/FirstTab.cpp.obj
E:\PROGRA~1\CLION2~1.1\bin\mingw\bin\G__~1.EXE -DMINGW_HAS_SECURE_API=1 -DQT_CORE_LIB -DQT_GUI_LIB -DQT_WIDGETS_LIB -DUNICODE -DWIN32 -DWIN64 -D_ENABLE_EXTENDED_ALIGNED_STORAGE -D_UNICODE -D_WIN64 -IE:/Projects/Galvion/cmake-build-debug -IE:/Projects/Galvion -IE:/Projects/Galvion/cmake-build-debug/Galvion_autogen/include -isystem C:/Qt/6.4.1/mingw_64/include/QtCore -isystem C:/Qt/6.4.1/mingw_64/include -isystem C:/Qt/6.4.1/mingw_64/mkspecs/win32-g++ -isystem C:/Qt/6.4.1/mingw_64/include/QtGui -isystem C:/Qt/6.4.1/mingw_64/include/QtWidgets -g -MD -MT CMakeFiles/Galvion.dir/FirstTab.cpp.obj -MF CMakeFiles\Galvion.dir\FirstTab.cpp.obj.d -o CMakeFiles/Galvion.dir/FirstTab.cpp.obj -c E:/Projects/Galvion/FirstTab.cpp
E:/Projects/Galvion/FirstTab.cpp: In constructor 'FirstTab::FirstTab(QWidget*)':
E:/Projects/Galvion/FirstTab.cpp:82:12: error: no matching function for call to 'FirstTab::connect(QWidget*&, void (MainWindow1::*)(QString), FirstTab*, void (FirstTab::*)(QString))'
82 | connect(parent, &MainWindow1::loadDateSig, this, &FirstTab::loadDate);
| ~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In file included from C:/Qt/6.4.1/mingw_64/include/QtCore/qabstractanimation.h:7,
from C:/Qt/6.4.1/mingw_64/include/QtCore/QtCore:10,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgetsDepends:3,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgets:3,
from E:/Projects/Galvion/FirstTab.h:7,
from E:/Projects/Galvion/FirstTab.cpp:5:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:201:43: note: candidate: 'static QMetaObject::Connection QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, const typename QtPrivate::FunctionPointer<Func2>::Object*, Func2, Qt::ConnectionType) [with Func1 = void (MainWindow1::*)(QString); Func2 = void (FirstTab::*)(QString); typename QtPrivate::FunctionPointer<Func>::Object = MainWindow1; typename QtPrivate::FunctionPointer<Func2>::Object = FirstTab]' (near match)
201 | static inline QMetaObject::Connection connect(const typename QtPrivate::FunctionPointer<Func1>::Object *sender, Func1 signal,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:201:43: note: conversion of argument 1 would be ill-formed:
E:/Projects/Galvion/FirstTab.cpp:82:13: error: invalid conversion from 'QWidget*' to 'const Object*' {aka 'const MainWindow1*'} [-fpermissive]
82 | connect(parent, &MainWindow1::loadDateSig, this, &FirstTab::loadDate);
| ^~~~~~
| |
| QWidget*
In file included from C:/Qt/6.4.1/mingw_64/include/QtCore/qabstractanimation.h:7,
from C:/Qt/6.4.1/mingw_64/include/QtCore/QtCore:10,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgetsDepends:3,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgets:3,
from E:/Projects/Galvion/FirstTab.h:7,
from E:/Projects/Galvion/FirstTab.cpp:5:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:233:13: note: candidate: 'template<class Func1, class Func2> static typename std::enable_if<((int)(QtPrivate::FunctionPointer<Func2>::ArgumentCount) >= 0), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, Func2)'
233 | connect(const typename QtPrivate::FunctionPointer<Func1>::Object *sender, Func1 signal, Func2 slot)
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:233:13: note: template argument deduction/substitution failed:
E:/Projects/Galvion/FirstTab.cpp:82:12: note: candidate expects 3 arguments, 4 provided
82 | connect(parent, &MainWindow1::loadDateSig, this, &FirstTab::loadDate);
| ~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In file included from C:/Qt/6.4.1/mingw_64/include/QtCore/qabstractanimation.h:7,
from C:/Qt/6.4.1/mingw_64/include/QtCore/QtCore:10,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgetsDepends:3,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgets:3,
from E:/Projects/Galvion/FirstTab.h:7,
from E:/Projects/Galvion/FirstTab.cpp:5:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:242:13: note: candidate: 'template<class Func1, class Func2> static typename std::enable_if<(((int)(QtPrivate::FunctionPointer<Func2>::ArgumentCount) >= 0) && (! QtPrivate::FunctionPointer<Func2>::IsPointerToMemberFunction)), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, const QObject*, Func2, Qt::ConnectionType)'
242 | connect(const typename QtPrivate::FunctionPointer<Func1>::Object *sender, Func1 signal, const QObject *context, Func2 slot,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:242:13: note: template argument deduction/substitution failed:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h: In substitution of 'template<class Func1, class Func2> static typename std::enable_if<(((int)(QtPrivate::FunctionPointer<Func2>::ArgumentCount) >= 0) && (! QtPrivate::FunctionPointer<Func2>::IsPointerToMemberFunction)), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, const QObject*, Func2, Qt::ConnectionType) [with Func1 = void (MainWindow1::*)(QString); Func2 = void (FirstTab::*)(QString)]':
E:/Projects/Galvion/FirstTab.cpp:82:12: required from here
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:242:13: error: no type named 'type' in 'struct std::enable_if<false, QMetaObject::Connection>'
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:276:13: note: candidate: 'template<class Func1, class Func2> static typename std::enable_if<((QtPrivate::FunctionPointer<Func2>::ArgumentCount == -1) && (! is_convertible_v<Func2, const char*>)), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, Func2)'
276 | connect(const typename QtPrivate::FunctionPointer<Func1>::Object *sender, Func1 signal, Func2 slot)
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:276:13: note: template argument deduction/substitution failed:
E:/Projects/Galvion/FirstTab.cpp:82:12: note: candidate expects 3 arguments, 4 provided
82 | connect(parent, &MainWindow1::loadDateSig, this, &FirstTab::loadDate);
| ~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In file included from C:/Qt/6.4.1/mingw_64/include/QtCore/qabstractanimation.h:7,
from C:/Qt/6.4.1/mingw_64/include/QtCore/QtCore:10,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgetsDepends:3,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgets:3,
from E:/Projects/Galvion/FirstTab.h:7,
from E:/Projects/Galvion/FirstTab.cpp:5:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:287:13: note: candidate: 'template<class Func1, class Func2> static typename std::enable_if<((QtPrivate::FunctionPointer<Func2>::ArgumentCount == -1) && (! is_convertible_v<Func2, const char*>)), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, const QObject*, Func2, Qt::ConnectionType)'
287 | connect(const typename QtPrivate::FunctionPointer<Func1>::Object *sender, Func1 signal, const QObject *context, Func2 slot,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:287:13: note: template argument deduction/substitution failed:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h: In substitution of 'template<class Func1, class Func2> static typename std::enable_if<((QtPrivate::FunctionPointer<Func2>::ArgumentCount == -1) && (! is_convertible_v<Func2, const char*>)), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, const QObject*, Func2, Qt::ConnectionType) [with Func1 = void (MainWindow1::*)(QString); Func2 = void (FirstTab::*)(QString)]':
E:/Projects/Galvion/FirstTab.cpp:82:12: required from here
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:287:13: error: no type named 'type' in 'struct std::enable_if<false, QMetaObject::Connection>'
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:181:36: note: candidate: 'static QMetaObject::Connection QObject::connect(const QObject*, const char*, const QObject*, const char*, Qt::ConnectionType)'
181 | static QMetaObject::Connection connect(const QObject *sender, const char *signal,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:181:79: note: no known conversion for argument 2 from 'void (MainWindow1::*)(QString)' to 'const char*'
181 | static QMetaObject::Connection connect(const QObject *sender, const char *signal,
| ~~~~~~~~~~~~^~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:184:36: note: candidate: 'static QMetaObject::Connection QObject::connect(const QObject*, const QMetaMethod&, const QObject*, const QMetaMethod&, Qt::ConnectionType)'
184 | static QMetaObject::Connection connect(const QObject *sender, const QMetaMethod &signal,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:184:86: note: no known conversion for argument 2 from 'void (MainWindow1::*)(QString)' to 'const QMetaMethod&'
184 | static QMetaObject::Connection connect(const QObject *sender, const QMetaMethod &signal,
| ~~~~~~~~~~~~~~~~~~~^~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:432:32: note: candidate: 'QMetaObject::Connection QObject::connect(const QObject*, const char*, const char*, Qt::ConnectionType) const'
432 | inline QMetaObject::Connection QObject::connect(const QObject *asender, const char *asignal,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:432:85: note: no known conversion for argument 2 from 'void (MainWindow1::*)(QString)' to 'const char*'
432 | inline QMetaObject::Connection QObject::connect(const QObject *asender, const char *asignal,
| ~~~~~~~~~~~~^~~~~~~
[3/4] Building CXX object CMakeFiles/Galvion.dir/mainwindow1.cpp.obj
ninja: build stopped: subcommand failed.
Edit: I forgot to mention that the QWidget FirstTab has and object name, so it should be finding it correctly
A:
Reading the error message may be tricky sometimes. The key is:
invalid conversion from 'QWidget*' to 'const Object*' {aka 'const FirstTab*'}
C++ doesn't allow automatic conversion from a base class (QWidget) to a derived class (FirstTab) as not all QWidgets are FirstTabs. So you should perform the conversion yourself. In general this could be safely achieved using qobject_cast, but in this case requesting a FirstTab instance instead of a general QWidget seems to be the best approach:
connect(this, &MainWindow1::loadDateSig, mainWidget->findChild<FirstTab *>("tab1"), &FirstTab::loadDate);
| My QT connect works from parent to child, but not vice versa | I am making a QT application. I have a MainWindow1 class and a FirstTab class. The MainWindow1 class is a QMainWindow and inside I created this signal ->
mainwindow1.h
signals:
void loadDateSig(QString strDate);
And in FirstTab I created this slot->
FirstTab.h
public slots:
void loadDate(QString date);
Q_Object is defined in both MainWindow1 and in FirstTab. Now in my MainWindow1 constructor, this is how I start ->
mainwindow1.cpp
MainWindow1::MainWindow1()
: mainWidget(new QTabWidget) {
setCentralWidget(mainWidget);
mainWidget->addTab(new FirstTab(mainWidget), "First");
createActions();
createStatusBar();
And below that, I start making some connections. I connect a few items to change the status that the document was modified. You can see them here, and these work with no issues ->
connect(mainWidget->findChild<QTimeEdit *>("timeEdit"), &QTimeEdit::editingFinished,
this, &MainWindow1::documentWasModified);
connect(mainWidget->findChild<QDateEdit *>("dateEdit"), &QTimeEdit::editingFinished,
this, &MainWindow1::documentWasModified);
connect(mainWidget->findChild<QLineEdit *>("shooterEdit"), &QLineEdit::textEdited,
this, &MainWindow1::documentWasModified);
connect(mainWidget->findChild<QLineEdit *>("recorderEdit"), &QLineEdit::textEdited,
this, &MainWindow1::documentWasModified);
connect(mainWidget->findChild<QDoubleSpinBox *>("tempCSpin"), &QDoubleSpinBox::editingFinished,
this, &MainWindow1::documentWasModified);
connect(mainWidget->findChild<QDoubleSpinBox *>("tempFSpin"), &QDoubleSpinBox::editingFinished,
this, &MainWindow1::documentWasModified);
Now I try to connect my loadDateSig and loadDate. I have tried both creating the connect in the child, and in the parent class. But both give me an error. I tried to connect similar to how I connected previosly ->
connect(this, &MainWindow1::loadDateSig, mainWidget->findChild<QWidget *>("tab1"), &FirstTab::loadDate);
However, this doesn't seem to work. I get these errors ->
====================[ Build | all | Debug ]=====================================
"E:\Program Files (x86)\CLion 2022.2.1\bin\cmake\win\bin\cmake.exe" --build E:\Projects\Galvion\cmake-build-debug --target all -j 9
[1/4] Automatic MOC and UIC for target Galvion
[2/3] Building CXX object CMakeFiles/Galvion.dir/mainwindow1.cpp.obj
FAILED: CMakeFiles/Galvion.dir/mainwindow1.cpp.obj
E:\PROGRA~1\CLION2~1.1\bin\mingw\bin\G__~1.EXE -DMINGW_HAS_SECURE_API=1 -DQT_CORE_LIB -DQT_GUI_LIB -DQT_WIDGETS_LIB -DUNICODE -DWIN32 -DWIN64 -D_ENABLE_EXTENDED_ALIGNED_STORAGE -D_UNICODE -D_WIN64 -IE:/Projects/Galvion/cmake-build-debug -IE:/Projects/Galvion -IE:/Projects/Galvion/cmake-build-debug/Galvion_autogen/include -isystem C:/Qt/6.4.1/mingw_64/include/QtCore -isystem C:/Qt/6.4.1/mingw_64/include -isystem C:/Qt/6.4.1/mingw_64/mkspecs/win32-g++ -isystem C:/Qt/6.4.1/mingw_64/include/QtGui -isystem C:/Qt/6.4.1/mingw_64/include/QtWidgets -g -MD -MT CMakeFiles/Galvion.dir/mainwindow1.cpp.obj -MF CMakeFiles\Galvion.dir\mainwindow1.cpp.obj.d -o CMakeFiles/Galvion.dir/mainwindow1.cpp.obj -c E:/Projects/Galvion/mainwindow1.cpp
E:/Projects/Galvion/mainwindow1.cpp: In constructor 'MainWindow1::MainWindow1()':
E:/Projects/Galvion/mainwindow1.cpp:26:12: error: no matching function for call to 'MainWindow1::connect(MainWindow1*, void (MainWindow1::*)(QString), QWidget*, void (FirstTab::*)(QString))'
26 | connect(this, &MainWindow1::loadDateSig, mainWidget->findChild<QWidget *>("tab1"), &FirstTab::loadDate);
| ~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In file included from C:/Qt/6.4.1/mingw_64/include/QtCore/qabstractanimation.h:7,
from C:/Qt/6.4.1/mingw_64/include/QtCore/QtCore:10,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgetsDepends:3,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QTWidgets:3,
from E:/Projects/Galvion/mainwindow1.h:7,
from E:/Projects/Galvion/mainwindow1.cpp:1:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:201:43: note: candidate: 'static QMetaObject::Connection QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, const typename QtPrivate::FunctionPointer<Func2>::Object*, Func2, Qt::ConnectionType) [with Func1 = void (MainWindow1::*)(QString); Func2 = void (FirstTab::*)(QString); typename QtPrivate::FunctionPointer<Func>::Object = MainWindow1; typename QtPrivate::FunctionPointer<Func2>::Object = FirstTab]' (near match)
201 | static inline QMetaObject::Connection connect(const typename QtPrivate::FunctionPointer<Func1>::Object *sender, Func1 signal,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:201:43: note: conversion of argument 3 would be ill-formed:
E:/Projects/Galvion/mainwindow1.cpp:26:78: error: invalid conversion from 'QWidget*' to 'const Object*' {aka 'const FirstTab*'} [-fpermissive]
26 | connect(this, &MainWindow1::loadDateSig, mainWidget->findChild<QWidget *>("tab1"), &FirstTab::loadDate);
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~
| |
| QWidget*
In file included from C:/Qt/6.4.1/mingw_64/include/QtCore/qabstractanimation.h:7,
from C:/Qt/6.4.1/mingw_64/include/QtCore/QtCore:10,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgetsDepends:3,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QTWidgets:3,
from E:/Projects/Galvion/mainwindow1.h:7,
from E:/Projects/Galvion/mainwindow1.cpp:1:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:233:13: note: candidate: 'template<class Func1, class Func2> static typename std::enable_if<((int)(QtPrivate::FunctionPointer<Func2>::ArgumentCount) >= 0), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, Func2)'
233 | connect(const typename QtPrivate::FunctionPointer<Func1>::Object *sender, Func1 signal, Func2 slot)
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:233:13: note: template argument deduction/substitution failed:
E:/Projects/Galvion/mainwindow1.cpp:26:12: note: candidate expects 3 arguments, 4 provided
26 | connect(this, &MainWindow1::loadDateSig, mainWidget->findChild<QWidget *>("tab1"), &FirstTab::loadDate);
| ~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In file included from C:/Qt/6.4.1/mingw_64/include/QtCore/qabstractanimation.h:7,
from C:/Qt/6.4.1/mingw_64/include/QtCore/QtCore:10,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgetsDepends:3,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QTWidgets:3,
from E:/Projects/Galvion/mainwindow1.h:7,
from E:/Projects/Galvion/mainwindow1.cpp:1:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:242:13: note: candidate: 'template<class Func1, class Func2> static typename std::enable_if<(((int)(QtPrivate::FunctionPointer<Func2>::ArgumentCount) >= 0) && (! QtPrivate::FunctionPointer<Func2>::IsPointerToMemberFunction)), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, const QObject*, Func2, Qt::ConnectionType)'
242 | connect(const typename QtPrivate::FunctionPointer<Func1>::Object *sender, Func1 signal, const QObject *context, Func2 slot,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:242:13: note: template argument deduction/substitution failed:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h: In substitution of 'template<class Func1, class Func2> static typename std::enable_if<(((int)(QtPrivate::FunctionPointer<Func2>::ArgumentCount) >= 0) && (! QtPrivate::FunctionPointer<Func2>::IsPointerToMemberFunction)), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, const QObject*, Func2, Qt::ConnectionType) [with Func1 = void (MainWindow1::*)(QString); Func2 = void (FirstTab::*)(QString)]':
E:/Projects/Galvion/mainwindow1.cpp:26:12: required from here
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:242:13: error: no type named 'type' in 'struct std::enable_if<false, QMetaObject::Connection>'
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:276:13: note: candidate: 'template<class Func1, class Func2> static typename std::enable_if<((QtPrivate::FunctionPointer<Func2>::ArgumentCount == -1) && (! is_convertible_v<Func2, const char*>)), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, Func2)'
276 | connect(const typename QtPrivate::FunctionPointer<Func1>::Object *sender, Func1 signal, Func2 slot)
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:276:13: note: template argument deduction/substitution failed:
E:/Projects/Galvion/mainwindow1.cpp:26:12: note: candidate expects 3 arguments, 4 provided
26 | connect(this, &MainWindow1::loadDateSig, mainWidget->findChild<QWidget *>("tab1"), &FirstTab::loadDate);
| ~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In file included from C:/Qt/6.4.1/mingw_64/include/QtCore/qabstractanimation.h:7,
from C:/Qt/6.4.1/mingw_64/include/QtCore/QtCore:10,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgetsDepends:3,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QTWidgets:3,
from E:/Projects/Galvion/mainwindow1.h:7,
from E:/Projects/Galvion/mainwindow1.cpp:1:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:287:13: note: candidate: 'template<class Func1, class Func2> static typename std::enable_if<((QtPrivate::FunctionPointer<Func2>::ArgumentCount == -1) && (! is_convertible_v<Func2, const char*>)), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, const QObject*, Func2, Qt::ConnectionType)'
287 | connect(const typename QtPrivate::FunctionPointer<Func1>::Object *sender, Func1 signal, const QObject *context, Func2 slot,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:287:13: note: template argument deduction/substitution failed:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h: In substitution of 'template<class Func1, class Func2> static typename std::enable_if<((QtPrivate::FunctionPointer<Func2>::ArgumentCount == -1) && (! is_convertible_v<Func2, const char*>)), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, const QObject*, Func2, Qt::ConnectionType) [with Func1 = void (MainWindow1::*)(QString); Func2 = void (FirstTab::*)(QString)]':
E:/Projects/Galvion/mainwindow1.cpp:26:12: required from here
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:287:13: error: no type named 'type' in 'struct std::enable_if<false, QMetaObject::Connection>'
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:181:36: note: candidate: 'static QMetaObject::Connection QObject::connect(const QObject*, const char*, const QObject*, const char*, Qt::ConnectionType)'
181 | static QMetaObject::Connection connect(const QObject *sender, const char *signal,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:181:79: note: no known conversion for argument 2 from 'void (MainWindow1::*)(QString)' to 'const char*'
181 | static QMetaObject::Connection connect(const QObject *sender, const char *signal,
| ~~~~~~~~~~~~^~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:184:36: note: candidate: 'static QMetaObject::Connection QObject::connect(const QObject*, const QMetaMethod&, const QObject*, const QMetaMethod&, Qt::ConnectionType)'
184 | static QMetaObject::Connection connect(const QObject *sender, const QMetaMethod &signal,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:184:86: note: no known conversion for argument 2 from 'void (MainWindow1::*)(QString)' to 'const QMetaMethod&'
184 | static QMetaObject::Connection connect(const QObject *sender, const QMetaMethod &signal,
| ~~~~~~~~~~~~~~~~~~~^~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:432:32: note: candidate: 'QMetaObject::Connection QObject::connect(const QObject*, const char*, const char*, Qt::ConnectionType) const'
432 | inline QMetaObject::Connection QObject::connect(const QObject *asender, const char *asignal,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:432:85: note: no known conversion for argument 2 from 'void (MainWindow1::*)(QString)' to 'const char*'
432 | inline QMetaObject::Connection QObject::connect(const QObject *asender, const char *asignal,
| ~~~~~~~~~~~~^~~~~~~
ninja: build stopped: subcommand failed.
Could anyone help me figure out why this happens?
I tried to create the connect in the FirstTab class. Changing the parent in mainwindow1.cpp form mainWidget parent to this so the child class can access the signal ->
mainWidget->addTab(new FirstTab(this), "First");
And then I create this connect in FirstTab.cpp ->
connect(parent, &MainWindow1::loadDateSig, this, &FirstTab::loadDate);
I get this error when trying to build ->
====================[ Build | all | Debug ]=====================================
"E:\Program Files (x86)\CLion 2022.2.1\bin\cmake\win\bin\cmake.exe" --build E:\Projects\Galvion\cmake-build-debug --target all -j 9
[1/5] Automatic MOC and UIC for target Galvion
[2/4] Building CXX object CMakeFiles/Galvion.dir/FirstTab.cpp.obj
FAILED: CMakeFiles/Galvion.dir/FirstTab.cpp.obj
E:\PROGRA~1\CLION2~1.1\bin\mingw\bin\G__~1.EXE -DMINGW_HAS_SECURE_API=1 -DQT_CORE_LIB -DQT_GUI_LIB -DQT_WIDGETS_LIB -DUNICODE -DWIN32 -DWIN64 -D_ENABLE_EXTENDED_ALIGNED_STORAGE -D_UNICODE -D_WIN64 -IE:/Projects/Galvion/cmake-build-debug -IE:/Projects/Galvion -IE:/Projects/Galvion/cmake-build-debug/Galvion_autogen/include -isystem C:/Qt/6.4.1/mingw_64/include/QtCore -isystem C:/Qt/6.4.1/mingw_64/include -isystem C:/Qt/6.4.1/mingw_64/mkspecs/win32-g++ -isystem C:/Qt/6.4.1/mingw_64/include/QtGui -isystem C:/Qt/6.4.1/mingw_64/include/QtWidgets -g -MD -MT CMakeFiles/Galvion.dir/FirstTab.cpp.obj -MF CMakeFiles\Galvion.dir\FirstTab.cpp.obj.d -o CMakeFiles/Galvion.dir/FirstTab.cpp.obj -c E:/Projects/Galvion/FirstTab.cpp
E:/Projects/Galvion/FirstTab.cpp: In constructor 'FirstTab::FirstTab(QWidget*)':
E:/Projects/Galvion/FirstTab.cpp:82:12: error: no matching function for call to 'FirstTab::connect(QWidget*&, void (MainWindow1::*)(QString), FirstTab*, void (FirstTab::*)(QString))'
82 | connect(parent, &MainWindow1::loadDateSig, this, &FirstTab::loadDate);
| ~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In file included from C:/Qt/6.4.1/mingw_64/include/QtCore/qabstractanimation.h:7,
from C:/Qt/6.4.1/mingw_64/include/QtCore/QtCore:10,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgetsDepends:3,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgets:3,
from E:/Projects/Galvion/FirstTab.h:7,
from E:/Projects/Galvion/FirstTab.cpp:5:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:201:43: note: candidate: 'static QMetaObject::Connection QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, const typename QtPrivate::FunctionPointer<Func2>::Object*, Func2, Qt::ConnectionType) [with Func1 = void (MainWindow1::*)(QString); Func2 = void (FirstTab::*)(QString); typename QtPrivate::FunctionPointer<Func>::Object = MainWindow1; typename QtPrivate::FunctionPointer<Func2>::Object = FirstTab]' (near match)
201 | static inline QMetaObject::Connection connect(const typename QtPrivate::FunctionPointer<Func1>::Object *sender, Func1 signal,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:201:43: note: conversion of argument 1 would be ill-formed:
E:/Projects/Galvion/FirstTab.cpp:82:13: error: invalid conversion from 'QWidget*' to 'const Object*' {aka 'const MainWindow1*'} [-fpermissive]
82 | connect(parent, &MainWindow1::loadDateSig, this, &FirstTab::loadDate);
| ^~~~~~
| |
| QWidget*
In file included from C:/Qt/6.4.1/mingw_64/include/QtCore/qabstractanimation.h:7,
from C:/Qt/6.4.1/mingw_64/include/QtCore/QtCore:10,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgetsDepends:3,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgets:3,
from E:/Projects/Galvion/FirstTab.h:7,
from E:/Projects/Galvion/FirstTab.cpp:5:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:233:13: note: candidate: 'template<class Func1, class Func2> static typename std::enable_if<((int)(QtPrivate::FunctionPointer<Func2>::ArgumentCount) >= 0), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, Func2)'
233 | connect(const typename QtPrivate::FunctionPointer<Func1>::Object *sender, Func1 signal, Func2 slot)
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:233:13: note: template argument deduction/substitution failed:
E:/Projects/Galvion/FirstTab.cpp:82:12: note: candidate expects 3 arguments, 4 provided
82 | connect(parent, &MainWindow1::loadDateSig, this, &FirstTab::loadDate);
| ~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In file included from C:/Qt/6.4.1/mingw_64/include/QtCore/qabstractanimation.h:7,
from C:/Qt/6.4.1/mingw_64/include/QtCore/QtCore:10,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgetsDepends:3,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgets:3,
from E:/Projects/Galvion/FirstTab.h:7,
from E:/Projects/Galvion/FirstTab.cpp:5:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:242:13: note: candidate: 'template<class Func1, class Func2> static typename std::enable_if<(((int)(QtPrivate::FunctionPointer<Func2>::ArgumentCount) >= 0) && (! QtPrivate::FunctionPointer<Func2>::IsPointerToMemberFunction)), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, const QObject*, Func2, Qt::ConnectionType)'
242 | connect(const typename QtPrivate::FunctionPointer<Func1>::Object *sender, Func1 signal, const QObject *context, Func2 slot,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:242:13: note: template argument deduction/substitution failed:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h: In substitution of 'template<class Func1, class Func2> static typename std::enable_if<(((int)(QtPrivate::FunctionPointer<Func2>::ArgumentCount) >= 0) && (! QtPrivate::FunctionPointer<Func2>::IsPointerToMemberFunction)), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, const QObject*, Func2, Qt::ConnectionType) [with Func1 = void (MainWindow1::*)(QString); Func2 = void (FirstTab::*)(QString)]':
E:/Projects/Galvion/FirstTab.cpp:82:12: required from here
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:242:13: error: no type named 'type' in 'struct std::enable_if<false, QMetaObject::Connection>'
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:276:13: note: candidate: 'template<class Func1, class Func2> static typename std::enable_if<((QtPrivate::FunctionPointer<Func2>::ArgumentCount == -1) && (! is_convertible_v<Func2, const char*>)), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, Func2)'
276 | connect(const typename QtPrivate::FunctionPointer<Func1>::Object *sender, Func1 signal, Func2 slot)
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:276:13: note: template argument deduction/substitution failed:
E:/Projects/Galvion/FirstTab.cpp:82:12: note: candidate expects 3 arguments, 4 provided
82 | connect(parent, &MainWindow1::loadDateSig, this, &FirstTab::loadDate);
| ~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In file included from C:/Qt/6.4.1/mingw_64/include/QtCore/qabstractanimation.h:7,
from C:/Qt/6.4.1/mingw_64/include/QtCore/QtCore:10,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgetsDepends:3,
from C:/Qt/6.4.1/mingw_64/include/QtWidgets/QtWidgets:3,
from E:/Projects/Galvion/FirstTab.h:7,
from E:/Projects/Galvion/FirstTab.cpp:5:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:287:13: note: candidate: 'template<class Func1, class Func2> static typename std::enable_if<((QtPrivate::FunctionPointer<Func2>::ArgumentCount == -1) && (! is_convertible_v<Func2, const char*>)), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, const QObject*, Func2, Qt::ConnectionType)'
287 | connect(const typename QtPrivate::FunctionPointer<Func1>::Object *sender, Func1 signal, const QObject *context, Func2 slot,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:287:13: note: template argument deduction/substitution failed:
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h: In substitution of 'template<class Func1, class Func2> static typename std::enable_if<((QtPrivate::FunctionPointer<Func2>::ArgumentCount == -1) && (! is_convertible_v<Func2, const char*>)), QMetaObject::Connection>::type QObject::connect(const typename QtPrivate::FunctionPointer<Func>::Object*, Func1, const QObject*, Func2, Qt::ConnectionType) [with Func1 = void (MainWindow1::*)(QString); Func2 = void (FirstTab::*)(QString)]':
E:/Projects/Galvion/FirstTab.cpp:82:12: required from here
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:287:13: error: no type named 'type' in 'struct std::enable_if<false, QMetaObject::Connection>'
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:181:36: note: candidate: 'static QMetaObject::Connection QObject::connect(const QObject*, const char*, const QObject*, const char*, Qt::ConnectionType)'
181 | static QMetaObject::Connection connect(const QObject *sender, const char *signal,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:181:79: note: no known conversion for argument 2 from 'void (MainWindow1::*)(QString)' to 'const char*'
181 | static QMetaObject::Connection connect(const QObject *sender, const char *signal,
| ~~~~~~~~~~~~^~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:184:36: note: candidate: 'static QMetaObject::Connection QObject::connect(const QObject*, const QMetaMethod&, const QObject*, const QMetaMethod&, Qt::ConnectionType)'
184 | static QMetaObject::Connection connect(const QObject *sender, const QMetaMethod &signal,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:184:86: note: no known conversion for argument 2 from 'void (MainWindow1::*)(QString)' to 'const QMetaMethod&'
184 | static QMetaObject::Connection connect(const QObject *sender, const QMetaMethod &signal,
| ~~~~~~~~~~~~~~~~~~~^~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:432:32: note: candidate: 'QMetaObject::Connection QObject::connect(const QObject*, const char*, const char*, Qt::ConnectionType) const'
432 | inline QMetaObject::Connection QObject::connect(const QObject *asender, const char *asignal,
| ^~~~~~~
C:/Qt/6.4.1/mingw_64/include/QtCore/qobject.h:432:85: note: no known conversion for argument 2 from 'void (MainWindow1::*)(QString)' to 'const char*'
432 | inline QMetaObject::Connection QObject::connect(const QObject *asender, const char *asignal,
| ~~~~~~~~~~~~^~~~~~~
[3/4] Building CXX object CMakeFiles/Galvion.dir/mainwindow1.cpp.obj
ninja: build stopped: subcommand failed.
Edit: I forgot to mention that the QWidget FirstTab has and object name, so it should be finding it correctly
| [
"Reading the error message may be tricky sometimes. The key is:\n\ninvalid conversion from 'QWidget*' to 'const Object*' {aka 'const FirstTab*'}\n\nC++ doesn't allow automatic conversion from a base class (QWidget) to a derived class (FirstTab) as not all QWidgets are FirstTabs. So you should perform the conversion yourself. In general this could be safely achieved using qobject_cast, but in this case requesting a FirstTab instance instead of a general QWidget seems to be the best approach:\nconnect(this, &MainWindow1::loadDateSig, mainWidget->findChild<FirstTab *>(\"tab1\"), &FirstTab::loadDate);\n\n"
] | [
0
] | [] | [] | [
"c++",
"connect",
"qt",
"qt6"
] | stackoverflow_0074675185_c++_connect_qt_qt6.txt |
Q:
"Google Translate internal error" when using the GOOGLETRANSLATE function in Google Sheets
I have a large spreadsheet with a list of English phrases/words in one column and then another column where all of those are translated into another language using the GOOGLETRANSLATE function. One example of such a row:
The formula is =GOOGLETRANSLATE(G786, "en", "nl")
When I click on it a box appears saying "Error" and then beneath that
"Google Translate internal error."
I am unsure what the issue is and how I can solve it. The strange thing is that if I change the formula to replace the cell number to a plain string like "Hello" and click enter it will properly translate it to Dutch, and actually if I re-insert the same formula shown in the picture with the cell number it actually translates it as expected.
EN
NL
Confirm before proceeding
=GOOGLETRANSLATE(G777, "en", "nl")
A:
try in H2:
=BYROW(G2:INDEX(G:G; MAX((G:G<>"")*ROW(G:G)));
LAMBDA(x; GOOGLETRANSLATE(x; G1; H1))
A:
The Googletranslate function appears to create this internal error when there is more than one possible meaning of a word or sentence
e.g. when I type "one" (1). which also has the meaning of united etc. it creates an error but it translates "one dog" correctly.
To get a list of verbs, I wrote e.g. "to love" . It created an error , it seems because "to love" is not just an infinitive but can can include the meaning "in order to love" .
So it seems the googletranslate function works much better with sentences where the translation is unique, than it does with single or a few words.
Writing period/ full stops "." after a short sentence or phrase also removes this internal error
A:
This seems to be a new problem because I've been using GOOGLETRANSLATE for about five years on lots of spreadsheets (vocabulary translations with thousands of entries). They have had a habit of timing out and becoming unusable. This error seems to have replaced the timing out. The period seems to have worked, so I just need to REGEXREPLACE it in the translation, I guess. And I need to do some more copy-and-paste-values-only once I have good translations.
"=REGEXREPLACE(GOOGLETRANSLATE(concatenate(E430,"."),"en","es"),"\.","")"
adds the period, translates, and then removes it (but it strips the \ from the comments so I can't show it here!)
| "Google Translate internal error" when using the GOOGLETRANSLATE function in Google Sheets | I have a large spreadsheet with a list of English phrases/words in one column and then another column where all of those are translated into another language using the GOOGLETRANSLATE function. One example of such a row:
The formula is =GOOGLETRANSLATE(G786, "en", "nl")
When I click on it a box appears saying "Error" and then beneath that
"Google Translate internal error."
I am unsure what the issue is and how I can solve it. The strange thing is that if I change the formula to replace the cell number to a plain string like "Hello" and click enter it will properly translate it to Dutch, and actually if I re-insert the same formula shown in the picture with the cell number it actually translates it as expected.
EN
NL
Confirm before proceeding
=GOOGLETRANSLATE(G777, "en", "nl")
| [
"try in H2:\n=BYROW(G2:INDEX(G:G; MAX((G:G<>\"\")*ROW(G:G))); \n LAMBDA(x; GOOGLETRANSLATE(x; G1; H1))\n\n",
"The Googletranslate function appears to create this internal error when there is more than one possible meaning of a word or sentence\ne.g. when I type \"one\" (1). which also has the meaning of united etc. it creates an error but it translates \"one dog\" correctly.\nTo get a list of verbs, I wrote e.g. \"to love\" . It created an error , it seems because \"to love\" is not just an infinitive but can can include the meaning \"in order to love\" .\nSo it seems the googletranslate function works much better with sentences where the translation is unique, than it does with single or a few words.\nWriting period/ full stops \".\" after a short sentence or phrase also removes this internal error\n",
"This seems to be a new problem because I've been using GOOGLETRANSLATE for about five years on lots of spreadsheets (vocabulary translations with thousands of entries). They have had a habit of timing out and becoming unusable. This error seems to have replaced the timing out. The period seems to have worked, so I just need to REGEXREPLACE it in the translation, I guess. And I need to do some more copy-and-paste-values-only once I have good translations.\n\"=REGEXREPLACE(GOOGLETRANSLATE(concatenate(E430,\".\"),\"en\",\"es\"),\"\\.\",\"\")\"\nadds the period, translates, and then removes it (but it strips the \\ from the comments so I can't show it here!)\n"
] | [
1,
0,
0
] | [] | [] | [
"google_sheets",
"google_sheets_formula",
"google_translate",
"internal_server_error"
] | stackoverflow_0074267796_google_sheets_google_sheets_formula_google_translate_internal_server_error.txt |
Q:
Transform and fill a dataframe depending on occurence of values within the dataframe
I have a dataframe such as :
Names1 Gene_name Status
SP1 GENE1 0
SP1 GENE1 1
SP1 GENE1 1
SP1 GENE1 2
SP1 GENE1 2
SP1 GENE2 0
SP3 GENE2 0
SP1 GENE2 1
SP2 GENE2 2
SP4 GENE3 1
SP4 GENE3 2
SP5 GENE3 0
SP5 GENE3 0
Then I would like to fill a new dataframe where each Gene_name is a column, and each Names is a row :
Names GENE1 GENE2 GENE3
SP1
SP2
SP3
SP4
SP5
and fill cells Values depending on the Satus for each Names groups
if only 0 > value = 0
if only 1 > value = 1
if both 0 & 1 > value = 0-1
if both 0 & 2 > value = 0-2
if both 1 & 2 > value = 1-2
if both 0 & 1 & 2 > value = 0-1-2
So for example GENE1 in SP1 both present a 0,1 and 2 status, so I fill 0-1-2 within the cell:
Names GENE1 GENE2 GENE3
SP1 0-1-2
SP2
SP3
SP4
SP5
then, SP2,SP3,SP4 and SP5 do not have value for the GENE1, so I put NA :
Names GENE1 GENE2 GENE3
SP1 0-1-2
SP2 NA
SP3 NA
SP4 NA
SP5 NA
Then for the GENE2:
GENE2 in SP1 both present a 0 and 1 status, so I fill 0-1 within the cell:
Names GENE1 GENE2 GENE3
SP1 0-1-2 0-1
SP2 NA
SP3 NA
SP4 NA
SP5 NA
GENE2 in SP2 present only a value 2 status, so I fill 2 within the cell:
Names GENE1 GENE2 GENE3
SP1 0-1-2 0-1
SP2 NA 2
SP3 NA
SP4 NA
SP5 NA
GENE2 in SP3 present only a value 0 status, so I fill 0 within the cell:
Names GENE1 GENE2 GENE3
SP1 0-1-2 0-1
SP2 NA 2
SP3 NA 0
SP4 NA
SP5 NA
and the other Names have no GENE2 values, so I put NA:
Names GENE1 GENE2 GENE3
SP1 0-1-2 0-1
SP2 NA 2
SP3 NA 0
SP4 NA NA
SP5 NA NA
and so on...
At the end I should get a full dataframe such as :
Names GENE1 GENE2 GENE3
SP1 0-1-2 0-1 NA
SP2 NA 2 NA
SP3 NA 0 NA
SP4 NA NA 0-2
SP5 NA NA 0
Does someone have an idea please ?
Here is the dict format of the dataframe if it can helps :
{'Names1': {0: 'SP1', 1: 'SP1', 2: 'SP1', 3: 'SP1', 4: 'SP1', 5: 'SP1', 6: 'SP3', 7: 'SP1', 8: 'SP2', 9: 'SP4', 10: 'SP4', 11: 'SP5', 12: 'SP5'}, 'Gene_name': {0: 'GENE1', 1: 'GENE1', 2: 'GENE1', 3: 'GENE1', 4: 'GENE1', 5: 'GENE2', 6: 'GENE2', 7: 'GENE2', 8: 'GENE2', 9: 'GENE3', 10: 'GENE3', 11: 'GENE3', 12: 'GENE3'}, 'Status': {0: 0, 1: 1, 2: 1, 3: 2, 4: 2, 5: 0, 6: 0, 7: 1, 8: 2, 9: 1, 10: 2, 11: 0, 12: 0}}
A:
Code
g = df.groupby(['Names1', 'Gene_name'])
g['Status'].agg(lambda x: '-'.join(x.astype('str').sort_values().unique())).unstack()
output
Gene_name GENE1 GENE2 GENE3
Names1
SP1 0-1-2 0-1 NaN
SP2 NaN 2 NaN
SP3 NaN 0 NaN
SP4 NaN NaN 1-2
SP5 NaN NaN 0
make desired output
(g['Status'].agg(lambda x: '-'.join(x.astype('str').sort_values().unique()))
.unstack().rename_axis(index='Name', columns=''))
result:
GENE1 GENE2 GENE3
Name
SP1 0-1-2 0-1 NaN
SP2 NaN 2 NaN
SP3 NaN 0 NaN
SP4 NaN NaN 1-2
SP5 NaN NaN 0
A:
The above solution would be neater, but just wanted to put out an alternative solution to the same:
import numpy as np
names = df['Names1'].unique()
genes = df['Gene_name'].unique()
result_df = pd.DataFrame({'Names': names})
for gene in genes:
values = []
for name in names:
result = '-'.join(map(str, count_df.loc[(count_df['Names1'] == name) & (count_df['Gene_name'] == gene), ['Status']]['Status'].to_numpy()))
if result == '':
values.append(np.nan)
else:
values.append(result)
result_df[gene] = values
result_df
Output
GENE1 GENE2 GENE3
Names
SP1 0-1-2 0-1 NaN
SP2 NaN 2 NaN
SP3 NaN 0 NaN
SP4 NaN NaN 1-2
SP5 NaN NaN 0
A:
with using pivot table the solutiont can looks like this:
df.pivot_table('Status','Names1','Gene_name',
aggfunc=lambda x: '-'.join(x.astype(str).unique())).rename_axis(columns=None)
>>>
'''
GENE1 GENE2 GENE3
Names1
SP1 0-1-2 0-1 NaN
SP2 NaN 2 NaN
SP3 NaN 0 NaN
SP4 NaN NaN 1-2
SP5 NaN NaN 0
| Transform and fill a dataframe depending on occurence of values within the dataframe | I have a dataframe such as :
Names1 Gene_name Status
SP1 GENE1 0
SP1 GENE1 1
SP1 GENE1 1
SP1 GENE1 2
SP1 GENE1 2
SP1 GENE2 0
SP3 GENE2 0
SP1 GENE2 1
SP2 GENE2 2
SP4 GENE3 1
SP4 GENE3 2
SP5 GENE3 0
SP5 GENE3 0
Then I would like to fill a new dataframe where each Gene_name is a column, and each Names is a row :
Names GENE1 GENE2 GENE3
SP1
SP2
SP3
SP4
SP5
and fill cells Values depending on the Satus for each Names groups
if only 0 > value = 0
if only 1 > value = 1
if both 0 & 1 > value = 0-1
if both 0 & 2 > value = 0-2
if both 1 & 2 > value = 1-2
if both 0 & 1 & 2 > value = 0-1-2
So for example GENE1 in SP1 both present a 0,1 and 2 status, so I fill 0-1-2 within the cell:
Names GENE1 GENE2 GENE3
SP1 0-1-2
SP2
SP3
SP4
SP5
then, SP2,SP3,SP4 and SP5 do not have value for the GENE1, so I put NA :
Names GENE1 GENE2 GENE3
SP1 0-1-2
SP2 NA
SP3 NA
SP4 NA
SP5 NA
Then for the GENE2:
GENE2 in SP1 both present a 0 and 1 status, so I fill 0-1 within the cell:
Names GENE1 GENE2 GENE3
SP1 0-1-2 0-1
SP2 NA
SP3 NA
SP4 NA
SP5 NA
GENE2 in SP2 present only a value 2 status, so I fill 2 within the cell:
Names GENE1 GENE2 GENE3
SP1 0-1-2 0-1
SP2 NA 2
SP3 NA
SP4 NA
SP5 NA
GENE2 in SP3 present only a value 0 status, so I fill 0 within the cell:
Names GENE1 GENE2 GENE3
SP1 0-1-2 0-1
SP2 NA 2
SP3 NA 0
SP4 NA
SP5 NA
and the other Names have no GENE2 values, so I put NA:
Names GENE1 GENE2 GENE3
SP1 0-1-2 0-1
SP2 NA 2
SP3 NA 0
SP4 NA NA
SP5 NA NA
and so on...
At the end I should get a full dataframe such as :
Names GENE1 GENE2 GENE3
SP1 0-1-2 0-1 NA
SP2 NA 2 NA
SP3 NA 0 NA
SP4 NA NA 0-2
SP5 NA NA 0
Does someone have an idea please ?
Here is the dict format of the dataframe if it can helps :
{'Names1': {0: 'SP1', 1: 'SP1', 2: 'SP1', 3: 'SP1', 4: 'SP1', 5: 'SP1', 6: 'SP3', 7: 'SP1', 8: 'SP2', 9: 'SP4', 10: 'SP4', 11: 'SP5', 12: 'SP5'}, 'Gene_name': {0: 'GENE1', 1: 'GENE1', 2: 'GENE1', 3: 'GENE1', 4: 'GENE1', 5: 'GENE2', 6: 'GENE2', 7: 'GENE2', 8: 'GENE2', 9: 'GENE3', 10: 'GENE3', 11: 'GENE3', 12: 'GENE3'}, 'Status': {0: 0, 1: 1, 2: 1, 3: 2, 4: 2, 5: 0, 6: 0, 7: 1, 8: 2, 9: 1, 10: 2, 11: 0, 12: 0}}
| [
"Code\ng = df.groupby(['Names1', 'Gene_name'])\ng['Status'].agg(lambda x: '-'.join(x.astype('str').sort_values().unique())).unstack()\n\noutput\nGene_name GENE1 GENE2 GENE3\nNames1 \nSP1 0-1-2 0-1 NaN\nSP2 NaN 2 NaN\nSP3 NaN 0 NaN\nSP4 NaN NaN 1-2\nSP5 NaN NaN 0\n\n\nmake desired output\n(g['Status'].agg(lambda x: '-'.join(x.astype('str').sort_values().unique()))\n .unstack().rename_axis(index='Name', columns=''))\n\nresult:\n GENE1 GENE2 GENE3\nName \nSP1 0-1-2 0-1 NaN\nSP2 NaN 2 NaN\nSP3 NaN 0 NaN\nSP4 NaN NaN 1-2\nSP5 NaN NaN 0\n\n",
"The above solution would be neater, but just wanted to put out an alternative solution to the same:\nimport numpy as np\n\nnames = df['Names1'].unique() \ngenes = df['Gene_name'].unique() \nresult_df = pd.DataFrame({'Names': names}) \n\nfor gene in genes: \n values = []\n for name in names: \n result = '-'.join(map(str, count_df.loc[(count_df['Names1'] == name) & (count_df['Gene_name'] == gene), ['Status']]['Status'].to_numpy()))\n if result == '':\n values.append(np.nan) \n else:\n values.append(result) \n\n result_df[gene] = values \n\nresult_df \n\nOutput\n GENE1 GENE2 GENE3\nNames \nSP1 0-1-2 0-1 NaN\nSP2 NaN 2 NaN\nSP3 NaN 0 NaN\nSP4 NaN NaN 1-2\nSP5 NaN NaN 0\n\n",
"with using pivot table the solutiont can looks like this:\ndf.pivot_table('Status','Names1','Gene_name',\n aggfunc=lambda x: '-'.join(x.astype(str).unique())).rename_axis(columns=None)\n>>>\n'''\n GENE1 GENE2 GENE3\nNames1 \nSP1 0-1-2 0-1 NaN\nSP2 NaN 2 NaN\nSP3 NaN 0 NaN\nSP4 NaN NaN 1-2\nSP5 NaN NaN 0\n\n"
] | [
3,
1,
0
] | [] | [] | [
"pandas",
"python",
"python_3.x"
] | stackoverflow_0074674654_pandas_python_python_3.x.txt |
Q:
How to change path to webdriver_manager to custom path in the cloud function environment
I'm trying to create a headless web scraper on cloud function. I have used Selenium to automate the driver provided by the Webdriver manager.
Can you please tell me how to change the wdm.cachePath according to virtual environment? Below is my code and the error I'm getting.
import os
import logging
# selenium 4
os.environ['GH_TOKEN'] = "gkjkjhjkhjhkjhuihjhgjhg"
os.environ['WDM_LOG'] = str(logging.NOTSET)
os.environ['WDM_LOCAL'] = '1'
os.environ['WDM_SSL_VERIFY'] = '0'
from selenium import webdriver
from selenium.webdriver.chrome.service import Service as ChromeService
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
def hello_world(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
`make_response <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>`.
"""
# instance of Options class allows
# us to configure Headless Chrome
options = Options()
print("options")
options.headless = True
driver = webdriver.Chrome(service=ChromeService(ChromeDriverManager("2.26", cache_valid_range=1, path = r".\\temp\\Drivers").install()
), options=options)
print("driver was initiated")
# this parameter tells Chrome that
# it should be run without UI (Headless)
# initializing webdriver for Chrome with our options
# driver = webdriver.Chrome(options=options)
# driver = webdriver.Chrome(ChromeDriverManager(path = r"/temp/data").install())
request_json = request.get_json()
if request_json and 'url' in request_json:
url = request_json['url']
driver.get('https://www.geeksforgeeks.org')
print(driver.title)
driver.close()
return f'Success!'
else:
return f'Not run'
Error logs -
Traceback (most recent call last): File "/layers/google.python.pip/pip/lib/python3.10/site-packages/flask/app.py", line 2525, in wsgi_app response = self.full_dispatch_request() File "/layers/google.python.pip/pip/lib/python3.10/site-packages/flask/app.py", line 1822, in full_dispatch_request rv = self.handle_user_exception(e) File "/layers/google.python.pip/pip/lib/python3.10/site-packages/flask/app.py", line 1820, in full_dispatch_request rv = self.dispatch_request() File "/layers/google.python.pip/pip/lib/python3.10/site-packages/flask/app.py", line 1796, in dispatch_request return self.ensure_sync(self.view_functions[rule.endpoint])(**view_args) File "/layers/google.python.pip/pip/lib/python3.10/site-packages/functions_framework/__init__.py", line 98, in view_func return function(request._get_current_object()) File "/workspace/main.py", line 28, in hello_world driver = webdriver.Chrome(service=ChromeService(ChromeDriverManager("2.26", cache_valid_range=1, path = r".\\temp\\Drivers").install() File "/layers/google.python.pip/pip/lib/python3.10/site-packages/webdriver_manager/chrome.py", line 39, in install driver_path = self._get_driver_path(self.driver) File "/layers/google.python.pip/pip/lib/python3.10/site-packages/webdriver_manager/core/manager.py", line 31, in _get_driver_path binary_path = self.driver_cache.save_file_to_cache(driver, file) File "/layers/google.python.pip/pip/lib/python3.10/site-packages/webdriver_manager/core/driver_cache.py", line 45, in save_file_to_cache archive = save_file(file, path) File "/layers/google.python.pip/pip/lib/python3.10/site-packages/webdriver_manager/core/utils.py", line 38, in save_file os.makedirs(directory, exist_ok=True) File "/layers/google.python.runtime/python/lib/python3.10/os.py", line 215, in makedirs makedirs(head, exist_ok=exist_ok) File "/layers/google.python.runtime/python/lib/python3.10/os.py", line 215, in makedirs makedirs(head, exist_ok=exist_ok) File "/layers/google.python.runtime/python/lib/python3.10/os.py", line 215, in makedirs makedirs(head, exist_ok=exist_ok)
I think the error is caused due to web driver manager trying to save the driver to cache is some static path, I already changed the path setting using
path = r".\\temp\\Drivers"
How to do it correctly?
A:
So I figured this out...
import os
import logging
# selenium 4
os.environ['WDM_LOG'] = str(logging.NOTSET)
from bs4 import BeautifulSoup
from selenium.webdriver.common.by import By
import time
from selenium import webdriver
from selenium.webdriver.chrome.service import Service as ChromeService
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
from selenium.webdriver import Chrome
def hello_world(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
`make_response <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>`.
"""
# instance of Options class allows
# us to configure Headless Chrome
print("driver was initiated")
# this parameter tells Chrome that
# it should be run without UI (Headless)
opts = Options()
opts.add_experimental_option("detach", True)
opts.headless= True
# initializing webdriver for Chrome with our options
driver = webdriver.Chrome(service= ChromeService(ChromeDriverManager(cache_valid_range=1).install() ), options = opts)
# chrome_driver_path = ChromeDriverManager().install()
request_json = request.get_json()
if request_json and 'url' in request_json:
# driver = webdriver.Chrome(service= chrome_driver_path, options = opts)
url = request_json['url']
driver.get(url)
driver.get(url)
# driver.find_element(By.XPATH,'//*[@id="QA0Szd"]/div/div/div[1]/div[2]/div/div[1]/div/div/div[2]/div[1]/div[1]/div[2]/div/div[1]/div[2]').click()
#to make sure content is fully loaded we can use time.sleep() after navigating to each page
import time
time.sleep(3)
#Find the total number of reviews
# total_number_of_reviews = driver.find_element('xpath','//*[@id="QA0Szd"]/div/div/div[1]/div[2]/div/div[1]/div/div/div[2]/div[9]').text.splitlines()[3]
# total_number_of_reviews = driver.find_element('xpath','//*[@id="QA0Szd"]/div/div/div[1]/div[2]/div/div[1]/div/div/div[2]/div[9]').text
# time.sleep(3)
# print(total_number_of_reviews)
# Find scroll layout
scrollable_div = driver.find_element('xpath','//*[@id="QA0Szd"]/div/div/div[1]/div[2]/div/div[1]/div/div/div[2]')
# time.sleep(3)
#Scroll as many times as necessary to load all reviews
total_reviews = int(driver.find_element('xpath', '//*[@id="QA0Szd"]/div/div/div[1]/div[2]/div/div[1]/div/div/div[2]/div[2]/div/div[2]/div[2]').text.split(' ')[0].replace(',',''))
time.sleep(3)
print(total_reviews)
for i in range(0, min(total_reviews, 500) ):
driver.execute_script('arguments[0].scrollTop = arguments[0].scrollHeight',scrollable_div)
time.sleep(1.5)
response = BeautifulSoup(driver.page_source, 'html.parser')
reviews = response.find_all('span', class_='wiI7pd')
restaurant__reviews = []
for review in reviews:
restaurant__reviews.append(review.text)
print(restaurant__reviews)
driver.close()
return f'Success!'
else:
driver.close()
return f'Not run'
| How to change path to webdriver_manager to custom path in the cloud function environment | I'm trying to create a headless web scraper on cloud function. I have used Selenium to automate the driver provided by the Webdriver manager.
Can you please tell me how to change the wdm.cachePath according to virtual environment? Below is my code and the error I'm getting.
import os
import logging
# selenium 4
os.environ['GH_TOKEN'] = "gkjkjhjkhjhkjhuihjhgjhg"
os.environ['WDM_LOG'] = str(logging.NOTSET)
os.environ['WDM_LOCAL'] = '1'
os.environ['WDM_SSL_VERIFY'] = '0'
from selenium import webdriver
from selenium.webdriver.chrome.service import Service as ChromeService
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
def hello_world(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
`make_response <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>`.
"""
# instance of Options class allows
# us to configure Headless Chrome
options = Options()
print("options")
options.headless = True
driver = webdriver.Chrome(service=ChromeService(ChromeDriverManager("2.26", cache_valid_range=1, path = r".\\temp\\Drivers").install()
), options=options)
print("driver was initiated")
# this parameter tells Chrome that
# it should be run without UI (Headless)
# initializing webdriver for Chrome with our options
# driver = webdriver.Chrome(options=options)
# driver = webdriver.Chrome(ChromeDriverManager(path = r"/temp/data").install())
request_json = request.get_json()
if request_json and 'url' in request_json:
url = request_json['url']
driver.get('https://www.geeksforgeeks.org')
print(driver.title)
driver.close()
return f'Success!'
else:
return f'Not run'
Error logs -
Traceback (most recent call last): File "/layers/google.python.pip/pip/lib/python3.10/site-packages/flask/app.py", line 2525, in wsgi_app response = self.full_dispatch_request() File "/layers/google.python.pip/pip/lib/python3.10/site-packages/flask/app.py", line 1822, in full_dispatch_request rv = self.handle_user_exception(e) File "/layers/google.python.pip/pip/lib/python3.10/site-packages/flask/app.py", line 1820, in full_dispatch_request rv = self.dispatch_request() File "/layers/google.python.pip/pip/lib/python3.10/site-packages/flask/app.py", line 1796, in dispatch_request return self.ensure_sync(self.view_functions[rule.endpoint])(**view_args) File "/layers/google.python.pip/pip/lib/python3.10/site-packages/functions_framework/__init__.py", line 98, in view_func return function(request._get_current_object()) File "/workspace/main.py", line 28, in hello_world driver = webdriver.Chrome(service=ChromeService(ChromeDriverManager("2.26", cache_valid_range=1, path = r".\\temp\\Drivers").install() File "/layers/google.python.pip/pip/lib/python3.10/site-packages/webdriver_manager/chrome.py", line 39, in install driver_path = self._get_driver_path(self.driver) File "/layers/google.python.pip/pip/lib/python3.10/site-packages/webdriver_manager/core/manager.py", line 31, in _get_driver_path binary_path = self.driver_cache.save_file_to_cache(driver, file) File "/layers/google.python.pip/pip/lib/python3.10/site-packages/webdriver_manager/core/driver_cache.py", line 45, in save_file_to_cache archive = save_file(file, path) File "/layers/google.python.pip/pip/lib/python3.10/site-packages/webdriver_manager/core/utils.py", line 38, in save_file os.makedirs(directory, exist_ok=True) File "/layers/google.python.runtime/python/lib/python3.10/os.py", line 215, in makedirs makedirs(head, exist_ok=exist_ok) File "/layers/google.python.runtime/python/lib/python3.10/os.py", line 215, in makedirs makedirs(head, exist_ok=exist_ok) File "/layers/google.python.runtime/python/lib/python3.10/os.py", line 215, in makedirs makedirs(head, exist_ok=exist_ok)
I think the error is caused due to web driver manager trying to save the driver to cache is some static path, I already changed the path setting using
path = r".\\temp\\Drivers"
How to do it correctly?
| [
"So I figured this out...\nimport os\nimport logging\n# selenium 4\n\nos.environ['WDM_LOG'] = str(logging.NOTSET)\n\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver.common.by import By\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service as ChromeService\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver import Chrome\n\ndef hello_world(request):\n \"\"\"Responds to any HTTP request.\n Args:\n request (flask.Request): HTTP request object.\n Returns:\n The response text or any set of values that can be turned into a\n Response object using\n `make_response <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>`.\n \"\"\"\n # instance of Options class allows\n # us to configure Headless Chrome\n \n print(\"driver was initiated\")\n \n \n # this parameter tells Chrome that\n # it should be run without UI (Headless)\n opts = Options()\n opts.add_experimental_option(\"detach\", True)\n opts.headless= True\n \n # initializing webdriver for Chrome with our options\n driver = webdriver.Chrome(service= ChromeService(ChromeDriverManager(cache_valid_range=1).install() ), options = opts)\n \n # chrome_driver_path = ChromeDriverManager().install()\n \n request_json = request.get_json()\n \n if request_json and 'url' in request_json:\n # driver = webdriver.Chrome(service= chrome_driver_path, options = opts)\n url = request_json['url']\n driver.get(url)\n driver.get(url)\n\n # driver.find_element(By.XPATH,'//*[@id=\"QA0Szd\"]/div/div/div[1]/div[2]/div/div[1]/div/div/div[2]/div[1]/div[1]/div[2]/div/div[1]/div[2]').click()\n #to make sure content is fully loaded we can use time.sleep() after navigating to each page\n\n import time\n time.sleep(3)\n\n #Find the total number of reviews\n # total_number_of_reviews = driver.find_element('xpath','//*[@id=\"QA0Szd\"]/div/div/div[1]/div[2]/div/div[1]/div/div/div[2]/div[9]').text.splitlines()[3]\n # total_number_of_reviews = driver.find_element('xpath','//*[@id=\"QA0Szd\"]/div/div/div[1]/div[2]/div/div[1]/div/div/div[2]/div[9]').text\n # time.sleep(3)\n # print(total_number_of_reviews)\n # Find scroll layout\n scrollable_div = driver.find_element('xpath','//*[@id=\"QA0Szd\"]/div/div/div[1]/div[2]/div/div[1]/div/div/div[2]')\n # time.sleep(3)\n #Scroll as many times as necessary to load all reviews\n total_reviews = int(driver.find_element('xpath', '//*[@id=\"QA0Szd\"]/div/div/div[1]/div[2]/div/div[1]/div/div/div[2]/div[2]/div/div[2]/div[2]').text.split(' ')[0].replace(',',''))\n time.sleep(3)\n print(total_reviews)\n\n for i in range(0, min(total_reviews, 500) ):\n driver.execute_script('arguments[0].scrollTop = arguments[0].scrollHeight',scrollable_div)\n time.sleep(1.5)\n\n\n\n response = BeautifulSoup(driver.page_source, 'html.parser')\n reviews = response.find_all('span', class_='wiI7pd')\n\n restaurant__reviews = []\n for review in reviews:\n restaurant__reviews.append(review.text)\n print(restaurant__reviews)\n driver.close()\n return f'Success!'\n else:\n driver.close()\n return f'Not run'\n\n"
] | [
0
] | [] | [] | [
"google_cloud_functions",
"selenium",
"selenium_chromedriver",
"selenium_webdriver",
"webdrivermanager_python"
] | stackoverflow_0074665557_google_cloud_functions_selenium_selenium_chromedriver_selenium_webdriver_webdrivermanager_python.txt |
Q:
Netsuite advanced template PDF change filename
When I want to download a PDF file which is generated from an advanced html template then it takes the filename of the label + internal id from the URL parameters. I would like to know if the filename is also editable and also if the browser title, which currently shows hotprint.nl, is also editable?
A:
I found this article and tested it work! You may want to try out.
https://www.entartes.com/blog/workaround-netsuites-pdf-file-naming
| Netsuite advanced template PDF change filename | When I want to download a PDF file which is generated from an advanced html template then it takes the filename of the label + internal id from the URL parameters. I would like to know if the filename is also editable and also if the browser title, which currently shows hotprint.nl, is also editable?
| [
"I found this article and tested it work! You may want to try out.\nhttps://www.entartes.com/blog/workaround-netsuites-pdf-file-naming\n"
] | [
0
] | [] | [] | [
"netsuite"
] | stackoverflow_0074559775_netsuite.txt |
Q:
How to get previous and next record order by date?
I want to get previous and next news order by date. The code below works fine if there is only one news a day. But cannot handle multiple news on the same day.
NewsController.php
public function detail($slug){
$news = \App\News::active()->where('slug', $slug)->firstOrFail();
$prev_news = \App\News::whereDate('date', '>', $news->date)->active()->orderBy('date', 'desc')->first();
$next_news = \App\News::whereDate('date', '<', $news->date)->active()->orderBy('date', 'desc')->first();
}
web.php
Route::get('/news/{slug}', 'NewsController@detail')->name('news');
Thanks
A:
$prev_news = $news->whereDate('date', '>', $news->date)->active()->orderBy('date', 'desc')->get();
$next_news = $news->whereDate('date', '<', $news->date)->active()->orderBy('date', 'desc')->get();
you used ->first();
this only returns 1 value. so change it up to get()
A:
I found a solution:
$news = \App\News::active()->where('slug', $slug)->firstOrFail();
$prev_news = \App\News::withDescription()->where(function($query) use($news){
$query->where(function($query) use($news){
$query->whereDate('date', $news->date)->where('id', '<', $news->id);
})->orWhereDate('date', '>', $news->date);
})->online()->orderBy('date', 'asc')->orderBy('id', 'desc')->active()->first();
$next_news = \App\News::withDescription()->where(function($query) use($news){
$query->where(function($query) use($news){
$query->whereDate('date', $news->date)->where('id', '>', $news->id);
})->orWhereDate('date', '<', $news->date);
})->online()->orderBy('date', 'desc')->orderBy('id', 'asc')->active()->first();
| How to get previous and next record order by date? | I want to get previous and next news order by date. The code below works fine if there is only one news a day. But cannot handle multiple news on the same day.
NewsController.php
public function detail($slug){
$news = \App\News::active()->where('slug', $slug)->firstOrFail();
$prev_news = \App\News::whereDate('date', '>', $news->date)->active()->orderBy('date', 'desc')->first();
$next_news = \App\News::whereDate('date', '<', $news->date)->active()->orderBy('date', 'desc')->first();
}
web.php
Route::get('/news/{slug}', 'NewsController@detail')->name('news');
Thanks
| [
"$prev_news = $news->whereDate('date', '>', $news->date)->active()->orderBy('date', 'desc')->get();\n\n$next_news = $news->whereDate('date', '<', $news->date)->active()->orderBy('date', 'desc')->get();\n\nyou used ->first();\nthis only returns 1 value. so change it up to get()\n",
"I found a solution:\n$news = \\App\\News::active()->where('slug', $slug)->firstOrFail();\n\n$prev_news = \\App\\News::withDescription()->where(function($query) use($news){\n $query->where(function($query) use($news){\n $query->whereDate('date', $news->date)->where('id', '<', $news->id);\n })->orWhereDate('date', '>', $news->date);\n})->online()->orderBy('date', 'asc')->orderBy('id', 'desc')->active()->first();\n\n$next_news = \\App\\News::withDescription()->where(function($query) use($news){\n $query->where(function($query) use($news){\n $query->whereDate('date', $news->date)->where('id', '>', $news->id);\n })->orWhereDate('date', '<', $news->date);\n})->online()->orderBy('date', 'desc')->orderBy('id', 'asc')->active()->first();\n\n"
] | [
1,
0
] | [] | [] | [
"eloquent",
"laravel"
] | stackoverflow_0074598544_eloquent_laravel.txt |
Q:
Does memcpy really a copy of the memory?
I'm experimenting with memory handling in C.
Giving the following code
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
typedef unsigned char BYTE;
typedef struct Data {
int valid;
double value;
} Data;
typedef struct Message {
int id;
int size;
int nr;
Data *data;
} Message;
int main() {
int sz = 5;
int id = 1;
int i;
Message msg;
msg.id = id;
msg.size = 0;
msg.nr = sz;
msg.data = malloc(sizeof(Data) * msg.nr);
for (i = 0; i < msg.nr; i++) {
msg.data[i].valid = 1;
msg.data[i].value = (double)i;
}
printf("Input data\nid: %d\nsize: %d\nnr: %d\n",
msg.id, msg.size, msg.nr);
for (i = 0; i < sz; i++)
printf("msg.data[%d].valid: %d\nmsg.data[%d].value: %lf\n",
i, msg.data[i].valid, i, msg.data[i].value);
int bufferSize = sizeof(msg) + (sizeof(Data) * msg.nr);
msg.size = bufferSize;
printf("bufferSize: %d\n", bufferSize);
BYTE *buffer = malloc(sizeof(BYTE) * bufferSize);
memcpy(buffer, &msg, bufferSize);
if (msg.data != NULL)
free(msg.data);
// test
Message *p = (Message *)buffer;
Message rcv;
rcv.id = 0;
rcv.size = 0;
rcv.nr = 0;
rcv.data = malloc(sizeof(Data) * p->nr);
memcpy(&rcv, buffer, p->size);
printf("Output data\nid: %d\nsize: %d\nnr: %d\n",
rcv.id, rcv.size, rcv.nr);
for (i = 0; i < sz; i++)
printf("rcv.data[%d].valid: %d\nrcv.data[%d].value: %lf\n",
i, rcv.data[i].valid, i, rcv.data[i].value);
if (rcv.data != NULL)
free(rcv.data);
if (buffer != NULL)
free(buffer);
}
I'm obtaining the following error at the end of the execution of the code
*** stack smashing detected ***: terminated
Going deeper what I found is that msg.data and rcv.data point to the same memory address
memory location
and when I free the rcv.data basically I'm releasing a memory location that as been released before.
I read that memcpy should create a copy, but I had a different experience.
I don't understand why it's happening.
I use gcc as compiler, and I tried to run the code in different machines, but I obtain the same result always.
Why this behavior?
A:
Why the code contains undefined behaviors
The first call to memcpy reads starting from the address of msg for bufferSize bytes, where bufferSize is larger than the size of msg itself. Reading beyond the size of the object causes undefined behaviors.
int bufferSize = sizeof(msg) + (sizeof(Data) * msg.nr);
// This reads beyond the size of `msg`
memcpy(buffer, &msg, bufferSize);
The memcpy function copies the data byte-by-byte, including the Data* data field of Message, which is why you're getting the same address for both msg.data and rcv.data.
memcpy does not attempt to create a copy of the array pointed to by the data pointer. Copying bytes beyond the end of msg also doesn't help, because msg (which is on the stack) is not going to be adjacent with the array pointed to by msg.data (which is on the heap).
memcpy entry on cppreference
Solution for arbitrary length array
To fix this, call memcpy two times separately to serialize the Message and the array of Data.
int bufferSize = sizeof(Message) + sizeof(Data) * msg.nr;
BYTE* buffer = malloc(sizeof(BYTE) * bufferSize);
memcpy(buffer, &msg, sizeof(Message));
memcpy(buffer + sizeof(Message), msg.data, sizeof(Data) * msg.nr);
Similarly, on the receiver end, call memcpy two times to for the Message and the array of Data.
Message rcv;
memcpy(&rcv, buffer, sizeof(Message));
rcv.data = malloc(sizeof(Data) * rcv.nr);
memcpy(rcv.data, buffer + sizeof(Message), sizeof(Data) * rcv.nr);
Try it interactively on godbolt
Solution for size-limited array
If it's acceptable to have the data array to have a fixed maximum size NUM_DATA_MAX, then (de)serialization can be done with 1, instead of 2, calls to memcpy. To do so, define Message to contain an array data, instead of a pointer to an array.
#define NUM_DATA_MAX 10
typedef struct Data {
int valid;
double value;
} Data;
typedef struct Message {
int id;
int size;
int nr;
Data data[NUM_DATA_MAX];
} Message;
On the sender end
int bufferSize = sizeof(Message);
BYTE* buffer = malloc(bufferSize);
memcpy(buffer, &msg, sizeof(Message));
On the receiver end
Message rcv;
memcpy(&rcv, buffer, sizeof(Message));
| Does memcpy really a copy of the memory? | I'm experimenting with memory handling in C.
Giving the following code
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
typedef unsigned char BYTE;
typedef struct Data {
int valid;
double value;
} Data;
typedef struct Message {
int id;
int size;
int nr;
Data *data;
} Message;
int main() {
int sz = 5;
int id = 1;
int i;
Message msg;
msg.id = id;
msg.size = 0;
msg.nr = sz;
msg.data = malloc(sizeof(Data) * msg.nr);
for (i = 0; i < msg.nr; i++) {
msg.data[i].valid = 1;
msg.data[i].value = (double)i;
}
printf("Input data\nid: %d\nsize: %d\nnr: %d\n",
msg.id, msg.size, msg.nr);
for (i = 0; i < sz; i++)
printf("msg.data[%d].valid: %d\nmsg.data[%d].value: %lf\n",
i, msg.data[i].valid, i, msg.data[i].value);
int bufferSize = sizeof(msg) + (sizeof(Data) * msg.nr);
msg.size = bufferSize;
printf("bufferSize: %d\n", bufferSize);
BYTE *buffer = malloc(sizeof(BYTE) * bufferSize);
memcpy(buffer, &msg, bufferSize);
if (msg.data != NULL)
free(msg.data);
// test
Message *p = (Message *)buffer;
Message rcv;
rcv.id = 0;
rcv.size = 0;
rcv.nr = 0;
rcv.data = malloc(sizeof(Data) * p->nr);
memcpy(&rcv, buffer, p->size);
printf("Output data\nid: %d\nsize: %d\nnr: %d\n",
rcv.id, rcv.size, rcv.nr);
for (i = 0; i < sz; i++)
printf("rcv.data[%d].valid: %d\nrcv.data[%d].value: %lf\n",
i, rcv.data[i].valid, i, rcv.data[i].value);
if (rcv.data != NULL)
free(rcv.data);
if (buffer != NULL)
free(buffer);
}
I'm obtaining the following error at the end of the execution of the code
*** stack smashing detected ***: terminated
Going deeper what I found is that msg.data and rcv.data point to the same memory address
memory location
and when I free the rcv.data basically I'm releasing a memory location that as been released before.
I read that memcpy should create a copy, but I had a different experience.
I don't understand why it's happening.
I use gcc as compiler, and I tried to run the code in different machines, but I obtain the same result always.
Why this behavior?
| [
"Why the code contains undefined behaviors\nThe first call to memcpy reads starting from the address of msg for bufferSize bytes, where bufferSize is larger than the size of msg itself. Reading beyond the size of the object causes undefined behaviors.\nint bufferSize = sizeof(msg) + (sizeof(Data) * msg.nr);\n// This reads beyond the size of `msg`\nmemcpy(buffer, &msg, bufferSize);\n\nThe memcpy function copies the data byte-by-byte, including the Data* data field of Message, which is why you're getting the same address for both msg.data and rcv.data.\nmemcpy does not attempt to create a copy of the array pointed to by the data pointer. Copying bytes beyond the end of msg also doesn't help, because msg (which is on the stack) is not going to be adjacent with the array pointed to by msg.data (which is on the heap).\nmemcpy entry on cppreference\nSolution for arbitrary length array\nTo fix this, call memcpy two times separately to serialize the Message and the array of Data.\nint bufferSize = sizeof(Message) + sizeof(Data) * msg.nr;\nBYTE* buffer = malloc(sizeof(BYTE) * bufferSize);\nmemcpy(buffer, &msg, sizeof(Message));\nmemcpy(buffer + sizeof(Message), msg.data, sizeof(Data) * msg.nr);\n\nSimilarly, on the receiver end, call memcpy two times to for the Message and the array of Data.\nMessage rcv;\nmemcpy(&rcv, buffer, sizeof(Message));\nrcv.data = malloc(sizeof(Data) * rcv.nr);\nmemcpy(rcv.data, buffer + sizeof(Message), sizeof(Data) * rcv.nr);\n\nTry it interactively on godbolt\nSolution for size-limited array\nIf it's acceptable to have the data array to have a fixed maximum size NUM_DATA_MAX, then (de)serialization can be done with 1, instead of 2, calls to memcpy. To do so, define Message to contain an array data, instead of a pointer to an array.\n#define NUM_DATA_MAX 10\n\ntypedef struct Data {\n int valid;\n double value;\n} Data;\n\ntypedef struct Message {\n int id;\n int size;\n int nr;\n Data data[NUM_DATA_MAX];\n} Message;\n\nOn the sender end\nint bufferSize = sizeof(Message);\nBYTE* buffer = malloc(bufferSize);\nmemcpy(buffer, &msg, sizeof(Message));\n\nOn the receiver end\nMessage rcv;\nmemcpy(&rcv, buffer, sizeof(Message));\n\n"
] | [
1
] | [] | [] | [
"c",
"gcc",
"memcpy"
] | stackoverflow_0074675214_c_gcc_memcpy.txt |
Q:
limiting the number of decimal places in python pandas table
I was trying to rewrite a CSV file using pandas module in python. I tried to multiply the first column (excluding the title) by 60 as below,
f=001.csv
Urbs_Data=pd.read_csv(f,header=None)
Urbs_Data=Urbs_Data.replace("Time_hrs","Time_min")
Urbs_Data.loc[1:,0]=Urbs_Data.loc[1:,0].astype(float)
Urbs_Data.loc[1:,0]*=60
It gives me some funny number for the first column, as
124.98000000000002,462.67
130.01999999999998,460.34
135.0,454.36
139.98000000000002,443.29
Is there any way to limit the number of decimal places for those numbers (to 2)? I tried to use the normal round function, it does not work for me.
A:
The DataFrame round method should work...
import numpy as np
import pandas as pd
some_numbers = np.random.ranf(5)
df = pd.DataFrame({'random_numbers':some_numbers})
rounded_df = df.round(decimals=2)
A:
import numpy as np
import pandas as pd
#fileName
f=001.csv
#Load File to Df
Urbs_Data=pd.read_csv(f,header=None)
#Round of all the numeric values to the specified decimal value
Urbs_Data= Urbs_Data.round(decimals=3)
This rounding off will be applied on all the Numeric columns
| limiting the number of decimal places in python pandas table | I was trying to rewrite a CSV file using pandas module in python. I tried to multiply the first column (excluding the title) by 60 as below,
f=001.csv
Urbs_Data=pd.read_csv(f,header=None)
Urbs_Data=Urbs_Data.replace("Time_hrs","Time_min")
Urbs_Data.loc[1:,0]=Urbs_Data.loc[1:,0].astype(float)
Urbs_Data.loc[1:,0]*=60
It gives me some funny number for the first column, as
124.98000000000002,462.67
130.01999999999998,460.34
135.0,454.36
139.98000000000002,443.29
Is there any way to limit the number of decimal places for those numbers (to 2)? I tried to use the normal round function, it does not work for me.
| [
"The DataFrame round method should work...\nimport numpy as np\nimport pandas as pd \n\nsome_numbers = np.random.ranf(5)\n\ndf = pd.DataFrame({'random_numbers':some_numbers})\n\nrounded_df = df.round(decimals=2)\n\n",
"import numpy as np\nimport pandas as pd \n\n#fileName\nf=001.csv\n\n#Load File to Df\nUrbs_Data=pd.read_csv(f,header=None)\n\n#Round of all the numeric values to the specified decimal value\nUrbs_Data= Urbs_Data.round(decimals=3)\n\nThis rounding off will be applied on all the Numeric columns\n"
] | [
31,
0
] | [] | [] | [
"dataframe",
"pandas",
"python"
] | stackoverflow_0054509060_dataframe_pandas_python.txt |
Q:
what is the difference between *p and p[1]?
Here is a code for dynamic memory allocation using malloc
void main()
{
int *p,n=5;
p=(int*)malloc(5*sizeof(int));
p[0]=10;
// or *p=10;
p[1]=20
// or *(p+1)=20;
}
As per my knowledge, p is a pointer variable that points to the base address of the allocated memory. If I dont use the * operator, then I can't access the contents pointed to by p. But the statements p[1]=20 and *(p+1)=20
do work the same. Why is this same and also what is the use of * if we can do it this way too p[1]
But then does it also means that when i use malloc the array allocated to the process will have the same name as the pointer used to point the base address
A:
The difference between *p and p[1] is that *p is a pointer that points to the first element of the array, while p[1] is the second element of the array. The difference is that *p gives you access to the entire array, while p[1] only gives you access to the single element.
| what is the difference between *p and p[1]? | Here is a code for dynamic memory allocation using malloc
void main()
{
int *p,n=5;
p=(int*)malloc(5*sizeof(int));
p[0]=10;
// or *p=10;
p[1]=20
// or *(p+1)=20;
}
As per my knowledge, p is a pointer variable that points to the base address of the allocated memory. If I dont use the * operator, then I can't access the contents pointed to by p. But the statements p[1]=20 and *(p+1)=20
do work the same. Why is this same and also what is the use of * if we can do it this way too p[1]
But then does it also means that when i use malloc the array allocated to the process will have the same name as the pointer used to point the base address
| [
"The difference between *p and p[1] is that *p is a pointer that points to the first element of the array, while p[1] is the second element of the array. The difference is that *p gives you access to the entire array, while p[1] only gives you access to the single element.\n"
] | [
0
] | [] | [] | [
"c",
"malloc",
"pointers"
] | stackoverflow_0074675249_c_malloc_pointers.txt |
Q:
how to add background image in a webpage
which tag we will use for adding the background image and how to remake it's size full length
I tried by using style tag in html of head section
background-image:url;
A:
To add a background image on an HTML element, use the HTML style attribute and the CSS background-image property:
A:
In HTML, you can use the image tag to load the image, but to use the image for the background, you must use the background-image CSS.
A:
to add a background image and increase its size you need to use the following CSS
body{background-image: url(image); background-size: width height;}
it will add background image with desired width and heigth
| how to add background image in a webpage | which tag we will use for adding the background image and how to remake it's size full length
I tried by using style tag in html of head section
background-image:url;
| [
"To add a background image on an HTML element, use the HTML style attribute and the CSS background-image property:\n",
"In HTML, you can use the image tag to load the image, but to use the image for the background, you must use the background-image CSS.\n",
"to add a background image and increase its size you need to use the following CSS\nbody{background-image: url(image); background-size: width height;}\nit will add background image with desired width and heigth\n"
] | [
0,
0,
0
] | [] | [] | [
"head",
"html",
"styles"
] | stackoverflow_0074675135_head_html_styles.txt |
Q:
How to implement a smooth clamp function in python?
The clamp function is clamp(x, min, max) = min if x < min, max if x > max, else x
I need a function that behaves like the clamp function, but is smooth (i.e. has a continuous derivative).
A:
What you are looking for is something like the Smoothstep function, which has a free parameter N, giving the "smoothness", i.e. how many derivatives should be continuous. It is defined as such:
This is used in several libraries and can be implemented in numpy as
import numpy as np
from scipy.special import comb
def smoothstep(x, x_min=0, x_max=1, N=1):
x = np.clip((x - x_min) / (x_max - x_min), 0, 1)
result = 0
for n in range(0, N + 1):
result += comb(N + n, n) * comb(2 * N + 1, N - n) * (-x) ** n
result *= x ** (N + 1)
return result
It reduces to the regular clamp function given N=0 (0 times differentiable), and gives increasing smoothness as you increase N. You can visualize it like this:
import matplotlib.pyplot as plt
x = np.linspace(-0.5, 1.5, 1000)
for N in range(0, 5):
y = smoothstep(x, N=N)
plt.plot(x, y, label=str(N))
plt.legend()
which gives this result:
A:
Normal clamp:
np.clip(x, mi, mx)
Smoothclamp (guaranteed to agree with normal clamp for x < min and x > max):
def smoothclamp(x, mi, mx): return mi + (mx-mi)*(lambda t: np.where(t < 0 , 0, np.where( t <= 1 , 3*t**2-2*t**3, 1 ) ) )( (x-mi)/(mx-mi) )
Sigmoid (Approximates clamp, never smaller than min, never larger than max)
def sigmoid(x,mi, mx): return mi + (mx-mi)*(lambda t: (1+200**(-t+0.5))**(-1) )( (x-mi)/(mx-mi) )
For some purposes Sigmoid will be better than Smoothclamp because Sigmoid is an invertible function - no information is lost.
For other purposes, you may need to be certain that f(x) = xmax for all x > xmax - in that case Smoothclamp is better. Also, as mentioned in another answer, there is a whole family of Smoothclamp functions, though the one given here is adequate for my purposes (no special properties other than a smooth derivative needed)
Plot them:
import numpy as np
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
x = np.linspace(-4,7,1000)
ax.plot(x, np.clip(x, -1, 4),'k-', lw=2, alpha=0.8, label='clamp')
ax.plot(x, smoothclamp(x, -1, 4),'g-', lw=3, alpha=0.5, label='smoothclamp')
ax.plot(x, sigmoid(x, -1, 4),'b-', lw=3, alpha=0.5, label='sigmoid')
plt.legend(loc='upper left')
plt.show()
Also of potential use is the arithmetic mean of these two:
def clampoid(x, mi, mx): return mi + (mx-mi)*(lambda t: 0.5*(1+200**(-t+0.5))**(-1) + 0.5*np.where(t < 0 , 0, np.where( t <= 1 , 3*t**2-2*t**3, 1 ) ) )( (x-mi)/(mx-mi) )
A:
As an option, if you want to make sure that there is a correspondence with the clamp function, you can convolve the normal clamp function with a smooth bell-like function such as Lorentzian or Gaussian.
This will guarantee the correspondence between the normal clamp function and its smoothed version. The smoothness itself will be defined by the underlying smooth function you choose to use in the convolution.
| How to implement a smooth clamp function in python? | The clamp function is clamp(x, min, max) = min if x < min, max if x > max, else x
I need a function that behaves like the clamp function, but is smooth (i.e. has a continuous derivative).
| [
"What you are looking for is something like the Smoothstep function, which has a free parameter N, giving the \"smoothness\", i.e. how many derivatives should be continuous. It is defined as such:\n\nThis is used in several libraries and can be implemented in numpy as\nimport numpy as np\nfrom scipy.special import comb\n\ndef smoothstep(x, x_min=0, x_max=1, N=1):\n x = np.clip((x - x_min) / (x_max - x_min), 0, 1)\n\n result = 0\n for n in range(0, N + 1):\n result += comb(N + n, n) * comb(2 * N + 1, N - n) * (-x) ** n\n\n result *= x ** (N + 1)\n\n return result\n\nIt reduces to the regular clamp function given N=0 (0 times differentiable), and gives increasing smoothness as you increase N. You can visualize it like this:\nimport matplotlib.pyplot as plt\n\nx = np.linspace(-0.5, 1.5, 1000)\n\nfor N in range(0, 5):\n y = smoothstep(x, N=N)\n plt.plot(x, y, label=str(N))\n\nplt.legend()\n\nwhich gives this result:\n\n",
"Normal clamp:\nnp.clip(x, mi, mx)\n\nSmoothclamp (guaranteed to agree with normal clamp for x < min and x > max):\ndef smoothclamp(x, mi, mx): return mi + (mx-mi)*(lambda t: np.where(t < 0 , 0, np.where( t <= 1 , 3*t**2-2*t**3, 1 ) ) )( (x-mi)/(mx-mi) )\n\nSigmoid (Approximates clamp, never smaller than min, never larger than max)\ndef sigmoid(x,mi, mx): return mi + (mx-mi)*(lambda t: (1+200**(-t+0.5))**(-1) )( (x-mi)/(mx-mi) )\n\nFor some purposes Sigmoid will be better than Smoothclamp because Sigmoid is an invertible function - no information is lost. \nFor other purposes, you may need to be certain that f(x) = xmax for all x > xmax - in that case Smoothclamp is better. Also, as mentioned in another answer, there is a whole family of Smoothclamp functions, though the one given here is adequate for my purposes (no special properties other than a smooth derivative needed)\nPlot them:\nimport numpy as np\nimport matplotlib.pyplot as plt\nfig, ax = plt.subplots(1, 1)\nx = np.linspace(-4,7,1000)\nax.plot(x, np.clip(x, -1, 4),'k-', lw=2, alpha=0.8, label='clamp')\nax.plot(x, smoothclamp(x, -1, 4),'g-', lw=3, alpha=0.5, label='smoothclamp')\nax.plot(x, sigmoid(x, -1, 4),'b-', lw=3, alpha=0.5, label='sigmoid')\nplt.legend(loc='upper left')\nplt.show()\n\n\nAlso of potential use is the arithmetic mean of these two: \ndef clampoid(x, mi, mx): return mi + (mx-mi)*(lambda t: 0.5*(1+200**(-t+0.5))**(-1) + 0.5*np.where(t < 0 , 0, np.where( t <= 1 , 3*t**2-2*t**3, 1 ) ) )( (x-mi)/(mx-mi) )\n\n",
"As an option, if you want to make sure that there is a correspondence with the clamp function, you can convolve the normal clamp function with a smooth bell-like function such as Lorentzian or Gaussian.\nThis will guarantee the correspondence between the normal clamp function and its smoothed version. The smoothness itself will be defined by the underlying smooth function you choose to use in the convolution.\n"
] | [
12,
10,
1
] | [] | [] | [
"clamp",
"numpy",
"pandas",
"python",
"smoothstep"
] | stackoverflow_0045165452_clamp_numpy_pandas_python_smoothstep.txt |
Q:
Why does Spring Boot ignore my CustomErrorController?
I have a custom ErrorController like this:
@Controller
public class CustomErrorController implements ErrorController {
@RequestMapping("/error42")
public String handleError(HttpServletRequest request) {
Object status = request.getAttribute(RequestDispatcher.ERROR_STATUS_CODE);
System.err.println(status);
if (Objects.isNull(status)) return "error";
int statusCode = Integer.parseInt(status.toString());
String view = switch (statusCode) {
case 403 -> "errors/403";
case 404 -> "errors/404";
case 500 -> "errors/500";
default -> "error";
};
return view;
}
}
And then I've set the server.error.path property like this:
server.error.path=/error42
So far, so good. Everything works fine. All the errors go through my CustomErrorController.
But when I set the error path to server.error.path=/error - and of course I change the request mapping annotation to @RequestMapping("/error") - this won't work anymore.
Spring Boot now completely ignores my CustomErrorController. I know, I've set the path to the one Spring Boot usually defines as standard, but is there no way to override this?
Many thanks for any information clearing up this weird behavior.
A:
I found the error, and it was solely my own fault. Since especially at the beginning of a Spring Boot career, the setting options quickly become overhelming, and one can lose sight of one or the other adjustment made, I would still like to leave this question and answer it myself.
The culprit was a self-configured view that i did weeks ago and completely lost track of:
@Configuration
@EnableWebMvc
public class WebMvcConfig implements WebMvcConfigurer {
/* FYI: will map URIs to views without the need of a Controller */
@Override
public void addViewControllers(ViewControllerRegistry registry) {
registry.addViewController("/login")
.setViewName("/login");
registry.addViewController("/error") // <--- Take this out !!!
.setViewName("/error");
registry.setOrder(Ordered.HIGHEST_PRECEDENCE);
}
}
May this help others facing the same mystery, why once again nothing runs quite as desired...
| Why does Spring Boot ignore my CustomErrorController? | I have a custom ErrorController like this:
@Controller
public class CustomErrorController implements ErrorController {
@RequestMapping("/error42")
public String handleError(HttpServletRequest request) {
Object status = request.getAttribute(RequestDispatcher.ERROR_STATUS_CODE);
System.err.println(status);
if (Objects.isNull(status)) return "error";
int statusCode = Integer.parseInt(status.toString());
String view = switch (statusCode) {
case 403 -> "errors/403";
case 404 -> "errors/404";
case 500 -> "errors/500";
default -> "error";
};
return view;
}
}
And then I've set the server.error.path property like this:
server.error.path=/error42
So far, so good. Everything works fine. All the errors go through my CustomErrorController.
But when I set the error path to server.error.path=/error - and of course I change the request mapping annotation to @RequestMapping("/error") - this won't work anymore.
Spring Boot now completely ignores my CustomErrorController. I know, I've set the path to the one Spring Boot usually defines as standard, but is there no way to override this?
Many thanks for any information clearing up this weird behavior.
| [
"I found the error, and it was solely my own fault. Since especially at the beginning of a Spring Boot career, the setting options quickly become overhelming, and one can lose sight of one or the other adjustment made, I would still like to leave this question and answer it myself.\nThe culprit was a self-configured view that i did weeks ago and completely lost track of:\n@Configuration\n@EnableWebMvc\npublic class WebMvcConfig implements WebMvcConfigurer {\n\n /* FYI: will map URIs to views without the need of a Controller */\n @Override\n public void addViewControllers(ViewControllerRegistry registry) {\n registry.addViewController(\"/login\")\n .setViewName(\"/login\");\n\n registry.addViewController(\"/error\") // <--- Take this out !!!\n .setViewName(\"/error\");\n\n registry.setOrder(Ordered.HIGHEST_PRECEDENCE);\n }\n}\n\nMay this help others facing the same mystery, why once again nothing runs quite as desired...\n"
] | [
0
] | [] | [] | [
"error_handling",
"spring",
"spring_boot"
] | stackoverflow_0074674975_error_handling_spring_spring_boot.txt |
Q:
How to do Animation to line renderer in Unity C#
Is there any way to add animation to this line renderer? I mean I want to draw a line from Point 1 to point3 the line should move like a progress bar. How to do it in the below script
public class DrawLineRenderer : MonoBehaviour
{
public Transform Point1;
public Transform Point2;
public Transform Point3;
public LineRenderer linerenderer;
public float vertexCount = 12;
public float Point2Ypositio = 2;
// Start is called before the first frame update
void Start()
{
linerenderer.SetWidth(10, 10);
}
// Update is called once per frame
void Update()
{
}
public void buttonPress()
{
Point2.transform.position = new Vector3((Point1.transform.position.x + Point3.transform.position.x)/2, Point2Ypositio, (Point1.transform.position.z + Point3.transform.position.z) /2);
var pointList = new List<Vector3>();
for(float ratio = 0;ratio<=1;ratio+= 1/vertexCount)
{
var tangent1 = Vector3.Lerp(Point1.position, Point2.position, ratio);
var tangent2 = Vector3.Lerp(Point2.position, Point3.position, ratio);
var curve = Vector3.Lerp(tangent1, tangent2, ratio);
pointList.Add(curve);
}
linerenderer.positionCount = pointList.Count;
linerenderer.SetPositions(pointList.ToArray());
}
}
A:
To animate a line renderer using linear interpolation, you can use the Lerp() method to interpolate between the points of the line.
Here is an example of how you might achieve that:
#region Serialized Fields
[SerializeField] private LineRenderer lineRenderer;
[SerializeField] private Transform[] positions;
[SerializeField] [Range(1, 4)] private float animationSpeed = 2f;
#endregion
private int _currentPosition;
private float _interpolation;
private bool _isAnimating;
#region Event Functions
private void Update()
{
if (!_isAnimating) return;
var startPoint = positions[_currentPosition].position;
var endPoint = positions[_currentPosition + 1].position;
var currentPoint = Vector3.Lerp(startPoint, endPoint, _interpolation);
lineRenderer.positionCount = _currentPosition + 2;
lineRenderer.SetPosition(_currentPosition, startPoint);
lineRenderer.SetPosition(_currentPosition + 1, currentPoint);
// Increment interpolation value over time
_interpolation += Time.deltaTime * animationSpeed;
// If the end of the line has been reached, move to the next position
if (_interpolation >= 1.0f)
{
_interpolation = 0.0f;
_currentPosition++;
// Stop animating if all positions have been visited
if (_currentPosition >= positions.Length - 1)
_isAnimating = false;
}
}
#endregion
/// <summary>
/// Resets then starts the animation of the line renderer
/// </summary>
public void ButtonPress()
{
_isAnimating = true;
_interpolation = 0;
_currentPosition = 0;
}
This script sets the position of the line renderer using Lerp() in the Update() loop and starts the animation by setting the _isAnimating flag to true when the ButtonPress() function is called. This will cause the line to animate from the starting to the ending point each time the button is pressed.
| How to do Animation to line renderer in Unity C# | Is there any way to add animation to this line renderer? I mean I want to draw a line from Point 1 to point3 the line should move like a progress bar. How to do it in the below script
public class DrawLineRenderer : MonoBehaviour
{
public Transform Point1;
public Transform Point2;
public Transform Point3;
public LineRenderer linerenderer;
public float vertexCount = 12;
public float Point2Ypositio = 2;
// Start is called before the first frame update
void Start()
{
linerenderer.SetWidth(10, 10);
}
// Update is called once per frame
void Update()
{
}
public void buttonPress()
{
Point2.transform.position = new Vector3((Point1.transform.position.x + Point3.transform.position.x)/2, Point2Ypositio, (Point1.transform.position.z + Point3.transform.position.z) /2);
var pointList = new List<Vector3>();
for(float ratio = 0;ratio<=1;ratio+= 1/vertexCount)
{
var tangent1 = Vector3.Lerp(Point1.position, Point2.position, ratio);
var tangent2 = Vector3.Lerp(Point2.position, Point3.position, ratio);
var curve = Vector3.Lerp(tangent1, tangent2, ratio);
pointList.Add(curve);
}
linerenderer.positionCount = pointList.Count;
linerenderer.SetPositions(pointList.ToArray());
}
}
| [
"To animate a line renderer using linear interpolation, you can use the Lerp() method to interpolate between the points of the line.\nHere is an example of how you might achieve that:\n#region Serialized Fields\n\n[SerializeField] private LineRenderer lineRenderer;\n[SerializeField] private Transform[] positions;\n[SerializeField] [Range(1, 4)] private float animationSpeed = 2f;\n\n#endregion\n\nprivate int _currentPosition;\nprivate float _interpolation;\nprivate bool _isAnimating;\n\n#region Event Functions\n\nprivate void Update()\n{\n if (!_isAnimating) return;\n\n var startPoint = positions[_currentPosition].position;\n var endPoint = positions[_currentPosition + 1].position;\n var currentPoint = Vector3.Lerp(startPoint, endPoint, _interpolation);\n\n lineRenderer.positionCount = _currentPosition + 2;\n lineRenderer.SetPosition(_currentPosition, startPoint);\n lineRenderer.SetPosition(_currentPosition + 1, currentPoint);\n\n // Increment interpolation value over time\n _interpolation += Time.deltaTime * animationSpeed;\n\n // If the end of the line has been reached, move to the next position\n if (_interpolation >= 1.0f)\n {\n _interpolation = 0.0f;\n _currentPosition++;\n\n // Stop animating if all positions have been visited\n if (_currentPosition >= positions.Length - 1)\n _isAnimating = false;\n }\n}\n\n#endregion\n\n/// <summary>\n/// Resets then starts the animation of the line renderer\n/// </summary>\npublic void ButtonPress()\n{\n _isAnimating = true;\n _interpolation = 0;\n _currentPosition = 0;\n}\n\nThis script sets the position of the line renderer using Lerp() in the Update() loop and starts the animation by setting the _isAnimating flag to true when the ButtonPress() function is called. This will cause the line to animate from the starting to the ending point each time the button is pressed.\n\n"
] | [
0
] | [] | [] | [
"animation",
"c#",
"line",
"rendering",
"unity3d"
] | stackoverflow_0074672251_animation_c#_line_rendering_unity3d.txt |
Q:
When is writer-preferred reader-writer lock used?
I know there are many reader-preferred rwlock, and there are even more aggressive design such as RCU, which are usually considered reader-preferred.
But what if the writers are not much less than the readers? I know a writer-only rwlock will fall back to an exclusive lock, but consider following patterns
writer-most, but there are some readers, perhaps 80% W vs 20% R?
writers and readers are even, 50% W vs 50% R
readers-most, but writers can not be ignored and they have requirement on latency, 20% W vs 80% R
I have two questions
Do such cases really exist, especially the first 2 cases? I don't know any user-level applications that fit into those types. User-level examples are preferred.
If there is indeed an application that fits into type 1/2/3, is there any existing way to improve the lock performance?
| When is writer-preferred reader-writer lock used? | I know there are many reader-preferred rwlock, and there are even more aggressive design such as RCU, which are usually considered reader-preferred.
But what if the writers are not much less than the readers? I know a writer-only rwlock will fall back to an exclusive lock, but consider following patterns
writer-most, but there are some readers, perhaps 80% W vs 20% R?
writers and readers are even, 50% W vs 50% R
readers-most, but writers can not be ignored and they have requirement on latency, 20% W vs 80% R
I have two questions
Do such cases really exist, especially the first 2 cases? I don't know any user-level applications that fit into those types. User-level examples are preferred.
If there is indeed an application that fits into type 1/2/3, is there any existing way to improve the lock performance?
| [] | [] | [
"I do not have enough knowledge to answer this question myself, but I think\nembeddedmonologue - rwlock and reader/writer starvation is worth reading.\n"
] | [
-1
] | [
"linux",
"locking",
"mutex",
"readerwriterlock",
"rwlock"
] | stackoverflow_0052224437_linux_locking_mutex_readerwriterlock_rwlock.txt |
Q:
cython report bug: expected an identifier
I am using cython to convert py file to pyd file.
My test code is:
# funcA.py
class Window:
def exec(self):
pass
pass
And the setup.py file is:
#setup.py
from setuptools import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
ext_modules = [
Extension('funcA', ['funcA.py']),
]
setup(
name='App',
ext_modules=ext_modules,
include_dirs=["App"],
cmdclass={'build_ext': build_ext},
)
After python setup.py build_ext --inplace, a bug is reported:
Error compiling Cython file:
------------------------------------------------------------
...
class Window:
def exec(self):
^
------------------------------------------------------------
funcA.py:4:8: Expected an identifier
building 'funcA' extension
"C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\bin\HostX86\x64\cl.exe" /c /nologo /O2 /W3 /GL /DNDEBUG /MD -IApp -ID:\Anaconda3\envs\work\include -ID:\Anaconda3\envs\work\Includ
e "-IC:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\ATLMFC\include" "-IC:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\include" "-IC:\Prog
ram Files (x86)\Windows Kits\10\include\10.0.19041.0\ucrt" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.19041.0\shared" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.19041.0\um" "-IC:\Program Files (x
86)\Windows Kits\10\include\10.0.19041.0\winrt" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.19041.0\cppwinrt" /TcfuncA.c /Fobuild\temp.win-amd64-cpython-39\Release\funcA.obj
funcA.c
funcA.c(1): fatal error C1189: #error: Do not use this file, it is the result of a failed Cython compilation.
error: command 'C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community\\VC\\Tools\\MSVC\\14.29.30133\\bin\\HostX86\\x64\\cl.exe' failed with exit code 2
But, if I replace the funcA.py to:
#new funcA.py
class Window:
pass
Everything is OK.
Why exec function would couse this bug?
Any suggestion is appreciated~~~
------------- update --------------------------------
Thanks @DavidW's suggestion.
The solution is adding #cython: language_level=3 on the top of script.
A:
Depending on what Cython version you're using (you don't say...) Cython defaults to Python 2 behaviour. In Python 2 exec was a keyword, so cannot be used as a function name.
Either:
pick a different function name,
put Cython in Python 3-like mode by setting language_level to 3. Bear in mind this may change some other things (e.g. print, the scope of list comprehensions, ...)
Use Cython 3 alpha version where it does default to Python 3 semantics by default.
| cython report bug: expected an identifier | I am using cython to convert py file to pyd file.
My test code is:
# funcA.py
class Window:
def exec(self):
pass
pass
And the setup.py file is:
#setup.py
from setuptools import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
ext_modules = [
Extension('funcA', ['funcA.py']),
]
setup(
name='App',
ext_modules=ext_modules,
include_dirs=["App"],
cmdclass={'build_ext': build_ext},
)
After python setup.py build_ext --inplace, a bug is reported:
Error compiling Cython file:
------------------------------------------------------------
...
class Window:
def exec(self):
^
------------------------------------------------------------
funcA.py:4:8: Expected an identifier
building 'funcA' extension
"C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\bin\HostX86\x64\cl.exe" /c /nologo /O2 /W3 /GL /DNDEBUG /MD -IApp -ID:\Anaconda3\envs\work\include -ID:\Anaconda3\envs\work\Includ
e "-IC:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\ATLMFC\include" "-IC:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\include" "-IC:\Prog
ram Files (x86)\Windows Kits\10\include\10.0.19041.0\ucrt" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.19041.0\shared" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.19041.0\um" "-IC:\Program Files (x
86)\Windows Kits\10\include\10.0.19041.0\winrt" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.19041.0\cppwinrt" /TcfuncA.c /Fobuild\temp.win-amd64-cpython-39\Release\funcA.obj
funcA.c
funcA.c(1): fatal error C1189: #error: Do not use this file, it is the result of a failed Cython compilation.
error: command 'C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community\\VC\\Tools\\MSVC\\14.29.30133\\bin\\HostX86\\x64\\cl.exe' failed with exit code 2
But, if I replace the funcA.py to:
#new funcA.py
class Window:
pass
Everything is OK.
Why exec function would couse this bug?
Any suggestion is appreciated~~~
------------- update --------------------------------
Thanks @DavidW's suggestion.
The solution is adding #cython: language_level=3 on the top of script.
| [
"Depending on what Cython version you're using (you don't say...) Cython defaults to Python 2 behaviour. In Python 2 exec was a keyword, so cannot be used as a function name.\nEither:\n\npick a different function name,\nput Cython in Python 3-like mode by setting language_level to 3. Bear in mind this may change some other things (e.g. print, the scope of list comprehensions, ...)\nUse Cython 3 alpha version where it does default to Python 3 semantics by default.\n\n"
] | [
1
] | [] | [] | [
"cython"
] | stackoverflow_0074674120_cython.txt |
Q:
Docker container exit with error code error libcurl not found
I am building a container, you can see the docker file, its for rust app deployment on Argonaut. but its not able to start. Here you can see the Dockerfile.
FROM rust:1.64.0-buster AS builder
WORKDIR /app
ARG TOKEN
ARG DATABASE_URL
RUN git config --global url."https://${TOKEN}:@github.com/".insteadOf "https://github.com/"
COPY . .
ENV CARGO_NET_GIT_FETCH_WITH_CLI true
RUN rustup component add rustfmt
RUN apt-get update -y && apt-get install git wget ca-certificates curl gnupg lsb-release cmake libcurl4 -y
RUN cargo build
FROM debian:buster-slim
WORKDIR /app
COPY --from=builder /app/target/debug/linkedin /app/target/release/linkedin
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
CMD ["/app/target/release/linkedin"]
EXPOSE 3000
It builds successfully but when it works it gets exit with error code 127.
linkedin-leadr-1 | /app/target/release/linkedin: error while loading shared libraries: libcurl.so.4: cannot open shared object file: No such file or directory
Have not found what's wrong with it, even though I am installing libcurl4. but my docker container is not able to find it. Can you please give me the solution?
A:
As you install libcurl4 in your build environment but not in your execution environment, that's most likely the reason.
There are two ways to solve this:
Install libcurl4 in your final image, or
Link statically by replacing cargo build with
RUN rustup target add x86_64-unknown-linux-musl
RUN cargo build --target=x86_64-unknown-linux-musl --release
The --release flag should get added either way, as I'm sure you don't want to deliver unoptimized debug builds to your enduser ;)
Note that if you choose to install libcurl4 in your final image, you need to clean up the apt cache afterwards, otherwise your image grows immensely:
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install --yes \
libcurl4 \
&& apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
The full Dockerfile with libcurl4 installed would then look like this:
FROM rust:1.64.0-buster AS builder
WORKDIR /app
ARG TOKEN
ARG DATABASE_URL
RUN git config --global url."https://${TOKEN}:@github.com/".insteadOf "https://github.com/"
COPY . .
ENV CARGO_NET_GIT_FETCH_WITH_CLI true
RUN rustup component add rustfmt
RUN apt-get update -y && apt-get install git wget ca-certificates curl gnupg lsb-release cmake libcurl4 -y
RUN cargo build
# Copy the libcurl shared library from the builder stage into the final container
RUN mkdir -p /usr/local/lib && \
cp /usr/lib/x86_64-linux-gnu/libcurl.so.4 /usr/local/lib && \
ln -s /usr/local/lib/libcurl.so.4 /usr/local/lib/libcurl.so
FROM debian:buster-slim
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install --yes \
libcurl4 \
&& apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
WORKDIR /app
COPY --from=builder /app/target/debug/linkedin /app/target/release/linkedin
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
CMD ["/app/target/release/linkedin"]
EXPOSE 3000
A:
It looks like your Dockerfile is missing a step to copy the libcurl shared library into your container. This is why you are getting the "error while loading shared libraries" message when you try to run the container.
To fix this, you can add a step to your Dockerfile to copy the libcurl shared library from the builder stage into the final container. Here's an example of how you might do this:
FROM rust:1.64.0-buster AS builder
WORKDIR /app
ARG TOKEN
ARG DATABASE_URL
RUN git config --global url."https://${TOKEN}:@github.com/".insteadOf "https://github.com/"
COPY . .
ENV CARGO_NET_GIT_FETCH_WITH_CLI true
RUN rustup component add rustfmt
RUN apt-get update -y && apt-get install git wget ca-certificates curl gnupg lsb-release cmake libcurl4 -y
RUN cargo build
# Copy the libcurl shared library from the builder stage into the final container
RUN mkdir -p /usr/local/lib && \
cp /usr/lib/x86_64-linux-gnu/libcurl.so.4 /usr/local/lib && \
ln -s /usr/local/lib/libcurl.so.4 /usr/local/lib/libcurl.so
FROM debian:buster-slim
WORKDIR /app
COPY --from=builder /app/target/debug/linkedin /app/target/release/linkedin
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
CMD ["/app/target/release/linkedin"]
EXPOSE 3000
You will need to adjust the path to the libcurl shared library based on your system and the version of libcurl that you are using. The example above assumes that you are running on a 64-bit Linux system and using libcurl version 4.
After making this change and rebuilding your container, you should be able to run it without encountering the "error while loading shared libraries" message.
| Docker container exit with error code error libcurl not found | I am building a container, you can see the docker file, its for rust app deployment on Argonaut. but its not able to start. Here you can see the Dockerfile.
FROM rust:1.64.0-buster AS builder
WORKDIR /app
ARG TOKEN
ARG DATABASE_URL
RUN git config --global url."https://${TOKEN}:@github.com/".insteadOf "https://github.com/"
COPY . .
ENV CARGO_NET_GIT_FETCH_WITH_CLI true
RUN rustup component add rustfmt
RUN apt-get update -y && apt-get install git wget ca-certificates curl gnupg lsb-release cmake libcurl4 -y
RUN cargo build
FROM debian:buster-slim
WORKDIR /app
COPY --from=builder /app/target/debug/linkedin /app/target/release/linkedin
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
CMD ["/app/target/release/linkedin"]
EXPOSE 3000
It builds successfully but when it works it gets exit with error code 127.
linkedin-leadr-1 | /app/target/release/linkedin: error while loading shared libraries: libcurl.so.4: cannot open shared object file: No such file or directory
Have not found what's wrong with it, even though I am installing libcurl4. but my docker container is not able to find it. Can you please give me the solution?
| [
"As you install libcurl4 in your build environment but not in your execution environment, that's most likely the reason.\nThere are two ways to solve this:\n\nInstall libcurl4 in your final image, or\nLink statically by replacing cargo build with\nRUN rustup target add x86_64-unknown-linux-musl\nRUN cargo build --target=x86_64-unknown-linux-musl --release\n\n\n\nThe --release flag should get added either way, as I'm sure you don't want to deliver unoptimized debug builds to your enduser ;)\nNote that if you choose to install libcurl4 in your final image, you need to clean up the apt cache afterwards, otherwise your image grows immensely:\nRUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install --yes \\\n libcurl4 \\\n && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*\n\n\nThe full Dockerfile with libcurl4 installed would then look like this:\nFROM rust:1.64.0-buster AS builder\nWORKDIR /app\n\nARG TOKEN\nARG DATABASE_URL\n\nRUN git config --global url.\"https://${TOKEN}:@github.com/\".insteadOf \"https://github.com/\"\n\nCOPY . .\n\nENV CARGO_NET_GIT_FETCH_WITH_CLI true\n\nRUN rustup component add rustfmt\nRUN apt-get update -y && apt-get install git wget ca-certificates curl gnupg lsb-release cmake libcurl4 -y\n\nRUN cargo build\n\n# Copy the libcurl shared library from the builder stage into the final container\nRUN mkdir -p /usr/local/lib && \\\n cp /usr/lib/x86_64-linux-gnu/libcurl.so.4 /usr/local/lib && \\\n ln -s /usr/local/lib/libcurl.so.4 /usr/local/lib/libcurl.so\n\n\nFROM debian:buster-slim\nRUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install --yes \\\n libcurl4 \\\n && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*\n\nWORKDIR /app\nCOPY --from=builder /app/target/debug/linkedin /app/target/release/linkedin\nCOPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/\n\nCMD [\"/app/target/release/linkedin\"]\nEXPOSE 3000\n\n",
"It looks like your Dockerfile is missing a step to copy the libcurl shared library into your container. This is why you are getting the \"error while loading shared libraries\" message when you try to run the container.\nTo fix this, you can add a step to your Dockerfile to copy the libcurl shared library from the builder stage into the final container. Here's an example of how you might do this:\nFROM rust:1.64.0-buster AS builder\nWORKDIR /app\n\nARG TOKEN\nARG DATABASE_URL\n\nRUN git config --global url.\"https://${TOKEN}:@github.com/\".insteadOf \"https://github.com/\"\n\nCOPY . .\n\nENV CARGO_NET_GIT_FETCH_WITH_CLI true\n\nRUN rustup component add rustfmt\nRUN apt-get update -y && apt-get install git wget ca-certificates curl gnupg lsb-release cmake libcurl4 -y\n\nRUN cargo build\n\n# Copy the libcurl shared library from the builder stage into the final container\nRUN mkdir -p /usr/local/lib && \\\n cp /usr/lib/x86_64-linux-gnu/libcurl.so.4 /usr/local/lib && \\\n ln -s /usr/local/lib/libcurl.so.4 /usr/local/lib/libcurl.so\n\nFROM debian:buster-slim\nWORKDIR /app\nCOPY --from=builder /app/target/debug/linkedin /app/target/release/linkedin\nCOPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/\n\nCMD [\"/app/target/release/linkedin\"]\nEXPOSE 3000\n\nYou will need to adjust the path to the libcurl shared library based on your system and the version of libcurl that you are using. The example above assumes that you are running on a 64-bit Linux system and using libcurl version 4.\nAfter making this change and rebuilding your container, you should be able to run it without encountering the \"error while loading shared libraries\" message.\n"
] | [
2,
0
] | [] | [] | [
"docker",
"rust"
] | stackoverflow_0074670887_docker_rust.txt |
Q:
htaccess - How to allow access only to specific files for specific domain
I'm trying to allow access to a specific file from specific domains using the following code, but it didn't work
<Files "file.zip">
require all denied
require host localhost
require ip 127.0.0.1
require ip xxx.yyy.zzz.aaa
</Files>
Are there better ways to do this
Thanks
A:
I mean if there are files on my site such as pictures or videos
And I want to give permission to specific domains to access specific files by name and not a file suffix in order for this domain to be able to display these videos or images on their sites
<Files "file.jpg">
Order Deny,Allow
Deny from all
Allow from domain1.com
Allow from domain2.com
</Files>
| htaccess - How to allow access only to specific files for specific domain | I'm trying to allow access to a specific file from specific domains using the following code, but it didn't work
<Files "file.zip">
require all denied
require host localhost
require ip 127.0.0.1
require ip xxx.yyy.zzz.aaa
</Files>
Are there better ways to do this
Thanks
| [
"I mean if there are files on my site such as pictures or videos\nAnd I want to give permission to specific domains to access specific files by name and not a file suffix in order for this domain to be able to display these videos or images on their sites\n<Files \"file.jpg\">\n Order Deny,Allow\n Deny from all\n Allow from domain1.com\n Allow from domain2.com\n</Files>\n\n"
] | [
0
] | [] | [] | [
".htaccess"
] | stackoverflow_0074661231_.htaccess.txt |
Q:
Divide area into equal parts
I read the documentation from skiasharp.. I am interested in how I can divide the surface of a shape (rectangle or polygon) into equal parts. For example, divide the surface into 6 equal parts and paint those parts with two colors according to the even-odd principle (something like football grass field texture). I did not find any similar example in the documentation.
A:
Maku, thanks for your answer. I resolved this.
I needed something like this in the picture:
And my code for this result looks like this:
using System;
using SkiaSharp;
using System.Collections.Generic;
using System.Collections.ObjectModel;
using System.Linq;
void Draw(SKCanvas canvas, int width, int height)
{
float scale = 22.0f;
SKPath path = new SKPath();
List<SKPoint> AreaCenters = new List<SKPoint>();
List<SKPath> OutlinePaths = new List<SKPath>();
OutlinePaths.Clear();
AreaCenters.Clear();
AreaCenters.Add(new SKPoint(0.0f, 0.0f));
AreaCenters.Add(new SKPoint(200.0f, 0.0f));
AreaCenters.Add(new SKPoint(100f, 200.0f));
float scaleFactor = 1.1f;
var scaleMatrix = SKMatrix.MakeSkew(scale * 2.0f, scale * scaleFactor);
SKPaint fillColor1 = new SKPaint
{
IsAntialias = true,
Color = SKColors.Transparent,
Style = SKPaintStyle.Stroke,
StrokeWidth = 3,
StrokeCap = SKStrokeCap.Square
};
SKPaint fillColor2 = new SKPaint
{
IsAntialias = true,
Color = SKColors.DarkGreen,
Style = SKPaintStyle.StrokeAndFill,
StrokeWidth = 3,
StrokeCap = SKStrokeCap.Square
};
SKPaint lineColor = new SKPaint
{
IsAntialias = true,
Color = SKColors.Orange,
Style = SKPaintStyle.Stroke,
StrokeWidth = 4.0f
};
fillColor2.PathEffect = SKPathEffect.Create2DLine(scale, scaleMatrix);
if (AreaCenters.Count > 0)
{
path.MoveTo((AreaCenters[0]));
foreach (SKPoint p in AreaCenters.ToArray())
{
path.LineTo((p));
}
path.Close();
//path.Transform(TransformationMatrix);
//OutlinePath = path;
//this.OutlinePaths.Add(this.OutlinePath);
OutlinePaths.Add(path);
canvas.Save();
canvas.DrawPath(path, lineColor);
if (AreaCenters.Count > 2)
canvas.ClipPath(OutlinePaths[0]);
SKPath sKPath = new SKPath();
sKPath.AddPath(path, -scale, -scale * 2f);
sKPath.AddPath(path, -scale, scale * 2f);
sKPath.AddPath(path, scale, -scale * 2f);
sKPath.AddPath(path, scale, scale * 2f);
canvas.DrawPath(sKPath, fillColor1);
canvas.DrawPath(path, fillColor2);
canvas.DrawPath(path, lineColor);
fillColor1.Dispose();
fillColor2.Dispose();
canvas.Restore();
}
}
For this purposes I'm used SKMatrix.CreateSkew() (or SKMatrix.MakeSkew()) method in skiasharp.
| Divide area into equal parts | I read the documentation from skiasharp.. I am interested in how I can divide the surface of a shape (rectangle or polygon) into equal parts. For example, divide the surface into 6 equal parts and paint those parts with two colors according to the even-odd principle (something like football grass field texture). I did not find any similar example in the documentation.
| [
"Maku, thanks for your answer. I resolved this.\nI needed something like this in the picture:\n\nAnd my code for this result looks like this:\n using System;\nusing SkiaSharp;\nusing System.Collections.Generic;\nusing System.Collections.ObjectModel;\nusing System.Linq;\n\nvoid Draw(SKCanvas canvas, int width, int height)\n{\n float scale = 22.0f;\n SKPath path = new SKPath();\n List<SKPoint> AreaCenters = new List<SKPoint>();\n List<SKPath> OutlinePaths = new List<SKPath>();\n OutlinePaths.Clear();\n AreaCenters.Clear();\n AreaCenters.Add(new SKPoint(0.0f, 0.0f));\n AreaCenters.Add(new SKPoint(200.0f, 0.0f));\n AreaCenters.Add(new SKPoint(100f, 200.0f));\n float scaleFactor = 1.1f;\n var scaleMatrix = SKMatrix.MakeSkew(scale * 2.0f, scale * scaleFactor);\n \n SKPaint fillColor1 = new SKPaint\n {\n IsAntialias = true,\n Color = SKColors.Transparent,\n Style = SKPaintStyle.Stroke,\n StrokeWidth = 3,\n StrokeCap = SKStrokeCap.Square\n };\n\n SKPaint fillColor2 = new SKPaint\n {\n IsAntialias = true,\n Color = SKColors.DarkGreen,\n Style = SKPaintStyle.StrokeAndFill,\n StrokeWidth = 3,\n StrokeCap = SKStrokeCap.Square\n };\n\n SKPaint lineColor = new SKPaint\n {\n IsAntialias = true,\n Color = SKColors.Orange,\n Style = SKPaintStyle.Stroke,\n StrokeWidth = 4.0f\n };\n\n \n fillColor2.PathEffect = SKPathEffect.Create2DLine(scale, scaleMatrix);\n\n \n\n if (AreaCenters.Count > 0)\n {\n path.MoveTo((AreaCenters[0]));\n foreach (SKPoint p in AreaCenters.ToArray())\n {\n path.LineTo((p));\n }\n path.Close();\n\n //path.Transform(TransformationMatrix);\n //OutlinePath = path;\n //this.OutlinePaths.Add(this.OutlinePath);\n OutlinePaths.Add(path);\n canvas.Save();\n\n canvas.DrawPath(path, lineColor);\n if (AreaCenters.Count > 2)\n canvas.ClipPath(OutlinePaths[0]);\n\n SKPath sKPath = new SKPath();\n sKPath.AddPath(path, -scale, -scale * 2f);\n sKPath.AddPath(path, -scale, scale * 2f);\n sKPath.AddPath(path, scale, -scale * 2f);\n sKPath.AddPath(path, scale, scale * 2f);\n canvas.DrawPath(sKPath, fillColor1);\n canvas.DrawPath(path, fillColor2);\n canvas.DrawPath(path, lineColor);\n fillColor1.Dispose();\n fillColor2.Dispose();\n canvas.Restore();\n }\n}\n\nFor this purposes I'm used SKMatrix.CreateSkew() (or SKMatrix.MakeSkew()) method in skiasharp.\n"
] | [
0
] | [] | [] | [
"c#",
"skia",
"skiasharp",
"xamarin.forms"
] | stackoverflow_0073420563_c#_skia_skiasharp_xamarin.forms.txt |
Q:
Logging and crash stack traces not showing in Android Studio
I'm trying to debug an app on my device and I'm having a bit of trouble with the debugger. I tried testing the logger to see if it would write to Logcat like so:
Log.d("MyActivity", "Testing logging...");
But nothing shows up in Logcat with the app: com.myapp.debug filter. It comes up when I simply filter by string (using my app name) but the entry looks like this:
01-08 13:45:07.468 29748-29748/? D/MyActivity﹕ Testing logging...
Does this question mark mean that something in the app is not getting passed through to the debugger? This might relate to my second issue with the debugger:
I've been debugging a crash and every time it happens, the phone simply shows the 'App is not responding' message then closes the current activity, disconnects the debugger, and the app keeps on running with the previous activity. No stack trace, no info about the crash, nothing. Is there something I need to set up in Android Studio to get this working?
A:
I'm also having this trouble and I can't find too a good answer for this.
Instead I did a work around and catch the error with Thread.setDefaultUncaughtExceptionHandler() and Log it with Log.e()
I used this class to do it.
public class ExceptionHandler implements java.lang.Thread.UncaughtExceptionHandler {
private final String LINE_SEPARATOR = "\n";
public static final String LOG_TAG = ExceptionHandler.class.getSimpleName();
@SuppressWarnings("deprecation")
public void uncaughtException(Thread thread, Throwable exception) {
StringWriter stackTrace = new StringWriter();
exception.printStackTrace(new PrintWriter(stackTrace));
StringBuilder errorReport = new StringBuilder();
errorReport.append(stackTrace.toString());
Log.e(LOG_TAG, errorReport.toString());
android.os.Process.killProcess(android.os.Process.myPid());
System.exit(10);
}
}
Then in my Activity .
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
/**
* catch unexpected error
*/
Thread.setDefaultUncaughtExceptionHandler(new ExceptionHandler());
setContentView(R.layout.activity_main);
//other codes
}
Hope this helps.
A:
I think it is the same adb or filer problem.
At first remove all filters.
Restart adb - type in terminal adb kill-server && adb start-server.
A:
Probably your google analytics "ga_reportUncaughtExceptions" is set to true, turning it to false fixes the issue and exceptions get printed to logcat.Please refer to below link for further details.
Why does android logcat not show the stack trace for a runtime exception?
A:
You should define a class that implement UncaughtExceptionHandler and use stackTraceToString in Kotlin:
import android.util.Log
import java.lang.Thread.UncaughtExceptionHandler
class ExceptionHandler : UncaughtExceptionHandler {
override fun uncaughtException(t: Thread, e: Throwable) {
val stackTrace: String = e.stackTraceToString()
Log.d("TAG", stackTrace)
}
}
and register it in your application:
Thread.setDefaultUncaughtExceptionHandler(ExceptionHandler())
| Logging and crash stack traces not showing in Android Studio | I'm trying to debug an app on my device and I'm having a bit of trouble with the debugger. I tried testing the logger to see if it would write to Logcat like so:
Log.d("MyActivity", "Testing logging...");
But nothing shows up in Logcat with the app: com.myapp.debug filter. It comes up when I simply filter by string (using my app name) but the entry looks like this:
01-08 13:45:07.468 29748-29748/? D/MyActivity﹕ Testing logging...
Does this question mark mean that something in the app is not getting passed through to the debugger? This might relate to my second issue with the debugger:
I've been debugging a crash and every time it happens, the phone simply shows the 'App is not responding' message then closes the current activity, disconnects the debugger, and the app keeps on running with the previous activity. No stack trace, no info about the crash, nothing. Is there something I need to set up in Android Studio to get this working?
| [
"I'm also having this trouble and I can't find too a good answer for this.\nInstead I did a work around and catch the error with Thread.setDefaultUncaughtExceptionHandler() and Log it with Log.e()\nI used this class to do it.\n public class ExceptionHandler implements java.lang.Thread.UncaughtExceptionHandler {\n private final String LINE_SEPARATOR = \"\\n\";\n public static final String LOG_TAG = ExceptionHandler.class.getSimpleName();\n\n @SuppressWarnings(\"deprecation\")\n public void uncaughtException(Thread thread, Throwable exception) {\n StringWriter stackTrace = new StringWriter();\n exception.printStackTrace(new PrintWriter(stackTrace));\n\n StringBuilder errorReport = new StringBuilder();\n errorReport.append(stackTrace.toString());\n\n Log.e(LOG_TAG, errorReport.toString());\n\n android.os.Process.killProcess(android.os.Process.myPid());\n System.exit(10);\n }\n}\n\nThen in my Activity .\n @Override\n protected void onCreate(Bundle savedInstanceState) {\n super.onCreate(savedInstanceState);\n\n /**\n * catch unexpected error\n */\n Thread.setDefaultUncaughtExceptionHandler(new ExceptionHandler());\n\n setContentView(R.layout.activity_main);\n\n //other codes\n }\n\nHope this helps.\n",
"I think it is the same adb or filer problem.\nAt first remove all filters.\nRestart adb - type in terminal adb kill-server && adb start-server.\n",
"Probably your google analytics \"ga_reportUncaughtExceptions\" is set to true, turning it to false fixes the issue and exceptions get printed to logcat.Please refer to below link for further details.\nWhy does android logcat not show the stack trace for a runtime exception?\n",
"You should define a class that implement UncaughtExceptionHandler and use stackTraceToString in Kotlin:\nimport android.util.Log\nimport java.lang.Thread.UncaughtExceptionHandler\n\nclass ExceptionHandler : UncaughtExceptionHandler {\n override fun uncaughtException(t: Thread, e: Throwable) {\n val stackTrace: String = e.stackTraceToString()\n\n Log.d(\"TAG\", stackTrace)\n }\n}\n\n\nand register it in your application:\nThread.setDefaultUncaughtExceptionHandler(ExceptionHandler())\n\n"
] | [
13,
4,
2,
0
] | [] | [] | [
"android",
"android_studio",
"debugging",
"java"
] | stackoverflow_0027841856_android_android_studio_debugging_java.txt |
Q:
From where I need to download the dSYM from app store connect?
I regularly download dSYM from the Activity tab in app store connect.
In that tab there are list of builds from their detail we can download the dSYMs.
But from today after After App Store Connect update, I have seen that there is no activity.
Apple Thread related to this topic.
So, From where I can download the dSYM?
A:
I found the way to download the dSYM
Log in to App Store Connect, then click My Apps > TestFlight.
Select the build you want to download a dSYM for.
Click Build Metadata Tab > Download dSYM.
Here I found a mail sent by the apple developer program support.
I'm with Apple Developer Program Support. I’m following up with you
regarding your recent email.
I understand you are unable to locate the Activity tab in App Store
Connect. I can certainly look into this for you. After researching
your request, it looks like the Activity tab has merged with the
TestFlight tab in App Store Connect. You can now see your build
metadata under the TestFlight tab under your app record.
Update (17th Dec 2020):
Activity tab name changed to Mac Build Activity
A:
I do not see a link next to "Include Symbols'...I was able to download the files using an instruction provided by flutter.
https://firebase.google.com/docs/crashlytics/get-deobfuscated-reports?platform=ios&authuser=1
Run the following to display all your dSYMs' UUIDs on your machine, then search for the missing dSYM:
mdfind -name .dSYM | while read -r line; do dwarfdump -u "$line"; done
Once you find the dSYM, upload it to Crashlytics. If the mdfind command doesn’t return any results, you can look in the Products directory where your .app lives (by default, the Products directory is located in Derived Data). If your app is released to production, you can also look for its dSYM in the .xcarchive directory on disk:
In Xcode, open the Organizer window and select your app from the list. Xcode displays a list of archives for your project.
Control-click an archive to view it in Finder. Then, control-click it again and click Show Package Contents.
Within .xcarchive is a dSYMs directory that contains dSYMs generated as part of Xcode’s archiving process. Recompiled bitcode dSYMs are also downloaded to this folder when you use the Download dSYMs... tool in the Organizer window.
A:
@Nathan Dudley
This 2 methods helped me find dSYM files
First method to find dSYM-
Navigate to-
/Users/{your_name}/Library/Developer/Xcode/Archives/{last archive date}/{app name + archive date}.xcarchive / dSYMs
look for the missing dSYM
insert to a new folder
compress that folder
Second method -
Go to Xcode MenuBar -> window -> Organizer
(works with Xcode 13.2.1)
Right click on the last archive -> show in finder
Right click on fileName.xcarchive -> Show package content

Look for the missing dSYM
insert to a new folder
compress that folder
Final stage -
Upload to
Hope it helped !
| From where I need to download the dSYM from app store connect? | I regularly download dSYM from the Activity tab in app store connect.
In that tab there are list of builds from their detail we can download the dSYMs.
But from today after After App Store Connect update, I have seen that there is no activity.
Apple Thread related to this topic.
So, From where I can download the dSYM?
| [
"I found the way to download the dSYM\n\nLog in to App Store Connect, then click My Apps > TestFlight.\nSelect the build you want to download a dSYM for.\nClick Build Metadata Tab > Download dSYM.\n\n\n\nHere I found a mail sent by the apple developer program support.\nI'm with Apple Developer Program Support. I’m following up with you\nregarding your recent email.\nI understand you are unable to locate the Activity tab in App Store\nConnect. I can certainly look into this for you. After researching\nyour request, it looks like the Activity tab has merged with the\nTestFlight tab in App Store Connect. You can now see your build\nmetadata under the TestFlight tab under your app record.\n\nUpdate (17th Dec 2020):\nActivity tab name changed to Mac Build Activity\n\n",
"I do not see a link next to \"Include Symbols'...I was able to download the files using an instruction provided by flutter.\nhttps://firebase.google.com/docs/crashlytics/get-deobfuscated-reports?platform=ios&authuser=1\nRun the following to display all your dSYMs' UUIDs on your machine, then search for the missing dSYM:\nmdfind -name .dSYM | while read -r line; do dwarfdump -u \"$line\"; done\nOnce you find the dSYM, upload it to Crashlytics. If the mdfind command doesn’t return any results, you can look in the Products directory where your .app lives (by default, the Products directory is located in Derived Data). If your app is released to production, you can also look for its dSYM in the .xcarchive directory on disk:\nIn Xcode, open the Organizer window and select your app from the list. Xcode displays a list of archives for your project.\nControl-click an archive to view it in Finder. Then, control-click it again and click Show Package Contents.\nWithin .xcarchive is a dSYMs directory that contains dSYMs generated as part of Xcode’s archiving process. Recompiled bitcode dSYMs are also downloaded to this folder when you use the Download dSYMs... tool in the Organizer window.\n",
"@Nathan Dudley\nThis 2 methods helped me find dSYM files\nFirst method to find dSYM-\n\nNavigate to-\n\n/Users/{your_name}/Library/Developer/Xcode/Archives/{last archive date}/{app name + archive date}.xcarchive / dSYMs
\n\nlook for the missing dSYM
\n\ninsert to a new folder
\n\ncompress that folder\n\n\nSecond method -\n\nGo to Xcode MenuBar -> window -> Organizer\n(works with Xcode 13.2.1)\n\nRight click on the last archive -> show in finder\n\nRight click on fileName.xcarchive -> Show package content\n\n\nLook for the missing dSYM\n\n\ninsert to a new folder
\n\ncompress that folder\n\n\nFinal stage -\nUpload to\n\nHope it helped !\n"
] | [
37,
4,
0
] | [] | [] | [
"app_store_connect",
"dsym",
"ios"
] | stackoverflow_0065210768_app_store_connect_dsym_ios.txt |
Q:
Notepad++ not syntax highlighting my files
Until a week ago I was happily coding html.erb files in Notepad++ with syntax highlighting. Then my hard drive crashed.
I reinstalled Notepad++ on my new system but when I open my html.erbs, only a few of them are highlighting properly.
I think the problem might be that most of these files are being considered 'normal text files', where the few that are working are considered 'html files'. I tried 'save as' html file of the same name, but it isn't working. How can I get my syntax highlighting back? This is seriously slowing me down.
A:
You could try going to Language > H > Html and that should highlight stuff.
A:
Just in case, if anyone meets my situation: I had activated Enable global foreground color in panel Settings -> Style configurator..., deactivating it reactivated syntax highlighting.
A:
Another possible situation that could cause this is if you install new themes. If the theme doesn't have certain languages defined and you use one of those, then you will lose the syntax highlighting as well.
To Check:
Settings -> Style Configurator
Check that the language is there on the left hand side for the current theme you are running.
A:
I had the exact same problem and none of the posted solutions worked for me so I digged deeper and found that I mistakenly assigned my user defined extensions twice under Settings → Style Configurator, for example SQL and XML.
Deleting my user defined extension from one language restored the syntax highlighting.
A:
So I Know this problem was solved 7 years ago but it's the top result on google so I wanted to add what the solution to my problem.
Under Settings -> Preferences... -> Languages
I had stupidly disabled python without realizing that that's what I was doing, just had to put it back into available items.
A:
Go to Settings -> Style configurator... then disable Enable global foreground color and Enable global background color. This fixed my problem.
A:
I had this problem when reinstalling notepad++ because with the new install, .r source code files were being recognized as REBOL instead of R. I had to manually remove the r file extension from the REBOL key in the langs.xml file. For instructions, see the similar question on the Notepad++ community forum at this link.
A:
My issue was very dumb but maybe people will have the same:
Some themes apply the same syntax coloring to all elements of the same language (example: python in Ruby Blue theme is just not colored in anything else than white).
Changing the theme to one that differentiates elements of the language I was coding in solved the issue.
| Notepad++ not syntax highlighting my files | Until a week ago I was happily coding html.erb files in Notepad++ with syntax highlighting. Then my hard drive crashed.
I reinstalled Notepad++ on my new system but when I open my html.erbs, only a few of them are highlighting properly.
I think the problem might be that most of these files are being considered 'normal text files', where the few that are working are considered 'html files'. I tried 'save as' html file of the same name, but it isn't working. How can I get my syntax highlighting back? This is seriously slowing me down.
| [
"You could try going to Language > H > Html and that should highlight stuff.\n",
"Just in case, if anyone meets my situation: I had activated Enable global foreground color in panel Settings -> Style configurator..., deactivating it reactivated syntax highlighting.\n",
"Another possible situation that could cause this is if you install new themes. If the theme doesn't have certain languages defined and you use one of those, then you will lose the syntax highlighting as well. \nTo Check:\nSettings -> Style Configurator\nCheck that the language is there on the left hand side for the current theme you are running.\n",
"I had the exact same problem and none of the posted solutions worked for me so I digged deeper and found that I mistakenly assigned my user defined extensions twice under Settings → Style Configurator, for example SQL and XML.\nDeleting my user defined extension from one language restored the syntax highlighting.\n",
"So I Know this problem was solved 7 years ago but it's the top result on google so I wanted to add what the solution to my problem. \n\nUnder Settings -> Preferences... -> Languages\n\nI had stupidly disabled python without realizing that that's what I was doing, just had to put it back into available items.\n",
"Go to Settings -> Style configurator... then disable Enable global foreground color and Enable global background color. This fixed my problem.\n",
"I had this problem when reinstalling notepad++ because with the new install, .r source code files were being recognized as REBOL instead of R. I had to manually remove the r file extension from the REBOL key in the langs.xml file. For instructions, see the similar question on the Notepad++ community forum at this link.\n",
"My issue was very dumb but maybe people will have the same:\nSome themes apply the same syntax coloring to all elements of the same language (example: python in Ruby Blue theme is just not colored in anything else than white).\nChanging the theme to one that differentiates elements of the language I was coding in solved the issue.\n"
] | [
80,
78,
3,
3,
1,
1,
0,
0
] | [
"go to eclipse in web browser set internate web page..\n"
] | [
-2
] | [
"erb",
"notepad++",
"syntax_highlighting"
] | stackoverflow_0003418191_erb_notepad++_syntax_highlighting.txt |
Q:
Unable to plot 2 classes in Linear Discriminant Analysis in Python using sklearn
Thanks for reading my question - I would greatly appreciate any input!
I am currently working on a LDA problem in Python - I'm a little new to ML, so that might be one reason why I am running into this problem. Regardless, here it is:
I have a classification problem, for short we'll call it T and non-T. I have a dataframe called PODall which contains my data and their labels (0 (non-T) vs 1 (T)).
I have used the sklearn LDA module to run this analysis. I am able to get a classification accuracy, etc., just unable to actually plot my data for visualization.
I have borrowed code from https://sebastianraschka.com/Articles/2014_python_lda.html#principal-component-analysis-vs-linear-discriminant-analysis, to be able to visualise my data, namely the plotting function:
X_lda_sklearn = sklearn_lda.fit_transform(X, y)
def plot_scikit_lda(X, title):
ax = plt.subplot(111)
for label,marker,color in zip(
range(1,4),('^', 's', 'o'),('blue', 'red', 'green')):
plt.scatter(x=X[:,0][y == label],
y=X[:,1][y == label] * -1, # flip the figure
marker=marker,
color=color,
alpha=0.5,
label=label_dict[label])
plt.xlabel('LD1')
plt.ylabel('LD2')
leg = plt.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.title(title)
# hide axis ticks
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# remove axis spines
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
plt.grid()
plt.tight_layout
plt.show()
plot_step_lda()
plot_scikit_lda(X_lda_sklearn, title='Default LDA via scikit-learn')
When I run this, I get the error that X is a one-dimensional array, and therefore X[:,1] errors.
If I add one more class, ie. "pre-T", "T", and "post-T", I am able to plot this visualisation.
If I need to clarify my problem, please let me know!!
Thanks!
~CJ
A:
This is probably a bit too late answer to help OP, but maybe it'll be useful for others: as per guide from Scikit-learn documentation, LDA always produces fewer dimensions than the number of classes in data.
When the number of components is not specified, it's calculated as the highest amount possible, that is:
n_components int, default=None
Number of components (<= min(n_classes - 1, n_features)) for
dimensionality reduction. If None, will be set to min(n_classes - 1, n_features).
n_components parameter description
So if you're using LDA on the 2 classes problem, you get only one dimension, which you can plot by setting the y parameter in scatter plot for example as array of zeros (or any other constant value).
| Unable to plot 2 classes in Linear Discriminant Analysis in Python using sklearn | Thanks for reading my question - I would greatly appreciate any input!
I am currently working on a LDA problem in Python - I'm a little new to ML, so that might be one reason why I am running into this problem. Regardless, here it is:
I have a classification problem, for short we'll call it T and non-T. I have a dataframe called PODall which contains my data and their labels (0 (non-T) vs 1 (T)).
I have used the sklearn LDA module to run this analysis. I am able to get a classification accuracy, etc., just unable to actually plot my data for visualization.
I have borrowed code from https://sebastianraschka.com/Articles/2014_python_lda.html#principal-component-analysis-vs-linear-discriminant-analysis, to be able to visualise my data, namely the plotting function:
X_lda_sklearn = sklearn_lda.fit_transform(X, y)
def plot_scikit_lda(X, title):
ax = plt.subplot(111)
for label,marker,color in zip(
range(1,4),('^', 's', 'o'),('blue', 'red', 'green')):
plt.scatter(x=X[:,0][y == label],
y=X[:,1][y == label] * -1, # flip the figure
marker=marker,
color=color,
alpha=0.5,
label=label_dict[label])
plt.xlabel('LD1')
plt.ylabel('LD2')
leg = plt.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.title(title)
# hide axis ticks
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# remove axis spines
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
plt.grid()
plt.tight_layout
plt.show()
plot_step_lda()
plot_scikit_lda(X_lda_sklearn, title='Default LDA via scikit-learn')
When I run this, I get the error that X is a one-dimensional array, and therefore X[:,1] errors.
If I add one more class, ie. "pre-T", "T", and "post-T", I am able to plot this visualisation.
If I need to clarify my problem, please let me know!!
Thanks!
~CJ
| [
"This is probably a bit too late answer to help OP, but maybe it'll be useful for others: as per guide from Scikit-learn documentation, LDA always produces fewer dimensions than the number of classes in data.\nWhen the number of components is not specified, it's calculated as the highest amount possible, that is:\n\nn_components int, default=None\nNumber of components (<= min(n_classes - 1, n_features)) for\ndimensionality reduction. If None, will be set to min(n_classes - 1, n_features).\n\nn_components parameter description\nSo if you're using LDA on the 2 classes problem, you get only one dimension, which you can plot by setting the y parameter in scatter plot for example as array of zeros (or any other constant value).\n"
] | [
0
] | [] | [] | [
"python",
"scikit_learn"
] | stackoverflow_0065644516_python_scikit_learn.txt |
Q:
Make a dynamic Listview inside a ListView =
as of the picture down below, I would like to make listview, where it is possible to add more lines(red) under each listview card.
I have implemented the overall listview(green), with the button that should add a list inside the list. Code is at the bottom
The picture is taken from the Strong app
My design right now is as follows:
Expanded(
// ignore: unnecessary_new
child: new ListView.builder(
itemCount: litems.length,
itemBuilder: (BuildContext ctxt, int Index) {
return Card(
child: Padding(
padding: EdgeInsets.all(10),
child: ExpansionTile(
initiallyExpanded: true,
title: Text(
litems[Index],
style: const TextStyle(
fontSize: 20,
fontWeight: FontWeight.bold,
),
),
children: <Widget>[
ElevatedButton(
onPressed: () {
litems.add('hei');
setState(() {});
},
child: const Text('Add Set')),
SizedBox(height: 5),
],
leading: IconButton(
icon: const Icon(
Icons.close,
color: Colors.red,
),
onPressed: () {
litems.removeAt(Index);
setState(() {});
},
),
)));
})),
ElevatedButton(
onPressed: () {
litems.add('hei');
setState(() {});
},
child: const Text('Add Exercises')),
A:
Try my code:
List<List<String>> parent = [];//init Parent
//Parent(Exercises) layout
Column(
children: [
ListView.builder(
itemCount: parent.length,
shrinkWrap: true,
itemBuilder: (context, index) {
return _buildList(parent[index]);
}),
TextButton(
onPressed: () {
parent.add([]);
setState(() {});
},
child: Text("Add Parent"))
],
)
//build children
_buildList(List<String> list) {
return Column(
children: [
ListView.builder(
itemCount: list.length,
shrinkWrap: true,
padding: EdgeInsets.all(0),
physics: const NeverScrollableScrollPhysics(),
itemExtent: 50,
itemBuilder: (context, index) {
return Container(
color: Colors.red.withOpacity((index * 5) / 100),
margin: EdgeInsets.symmetric(vertical: 0),
child: Text('Item'),
);
},
),
TextButton(
onPressed: () {
list.add("value");
setState(() {});
},
child: Text("Add Item"))
],
);
}
| Make a dynamic Listview inside a ListView = | as of the picture down below, I would like to make listview, where it is possible to add more lines(red) under each listview card.
I have implemented the overall listview(green), with the button that should add a list inside the list. Code is at the bottom
The picture is taken from the Strong app
My design right now is as follows:
Expanded(
// ignore: unnecessary_new
child: new ListView.builder(
itemCount: litems.length,
itemBuilder: (BuildContext ctxt, int Index) {
return Card(
child: Padding(
padding: EdgeInsets.all(10),
child: ExpansionTile(
initiallyExpanded: true,
title: Text(
litems[Index],
style: const TextStyle(
fontSize: 20,
fontWeight: FontWeight.bold,
),
),
children: <Widget>[
ElevatedButton(
onPressed: () {
litems.add('hei');
setState(() {});
},
child: const Text('Add Set')),
SizedBox(height: 5),
],
leading: IconButton(
icon: const Icon(
Icons.close,
color: Colors.red,
),
onPressed: () {
litems.removeAt(Index);
setState(() {});
},
),
)));
})),
ElevatedButton(
onPressed: () {
litems.add('hei');
setState(() {});
},
child: const Text('Add Exercises')),
| [
"Try my code:\nList<List<String>> parent = [];//init Parent\n\n//Parent(Exercises) layout\n Column(\n children: [\n ListView.builder(\n itemCount: parent.length,\n shrinkWrap: true,\n itemBuilder: (context, index) {\n return _buildList(parent[index]);\n }),\n TextButton(\n onPressed: () {\n parent.add([]);\n setState(() {});\n },\n child: Text(\"Add Parent\"))\n ],\n )\n\n//build children\n_buildList(List<String> list) {\n return Column(\n children: [\n ListView.builder(\n itemCount: list.length,\n shrinkWrap: true,\n padding: EdgeInsets.all(0),\n physics: const NeverScrollableScrollPhysics(),\n itemExtent: 50,\n itemBuilder: (context, index) {\n return Container(\n color: Colors.red.withOpacity((index * 5) / 100),\n margin: EdgeInsets.symmetric(vertical: 0),\n child: Text('Item'),\n );\n },\n ),\n TextButton(\n onPressed: () {\n list.add(\"value\");\n setState(() {});\n },\n child: Text(\"Add Item\"))\n ],\n );\n }\n\n"
] | [
1
] | [
"Use Functions to add widgets .\n"
] | [
-1
] | [
"card",
"dynamic",
"flutter",
"listview"
] | stackoverflow_0073442622_card_dynamic_flutter_listview.txt |
Q:
discord login button selenium
im trying to auto login to my discord account and stay online with pyton and selenium
the error :
driver.find_element(By.XPATH, '//*[@id="app-mount"]/div[2]/div/div[2]/div/div/form/div/div/div[1]/div[3]/button[2]').click()
this is my code :
import time
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
# Github credentials
username = "email"
password = "password"
# initialize the Chrome driver
driver = webdriver.Chrome("chromedriver")
# head to github login page
driver.get("https://discord.com/login")
time.sleep(3)
# find username/email field and send the username itself to the input field
driver.find_element(By.NAME, 'email').send_keys(username)
# find password input field and insert password as well
driver.find_element(By.NAME, 'password').send_keys(password)
time.sleep(10)
# click login button
driver.find_element(By.XPATH, '//*[@id="app-mount"]/div[2]/div/div[2]/div/div/form/div/div/div[1]/div[3]/button[2]').click()
# wait the ready state to be complete
WebDriverWait(driver=driver, timeout=10).until(
lambda x: x.execute_script("return document.readyState === 'complete'")
)
error_message = "Incorrect username or password."
# get the errors (if there are)
errors = driver.find_elements(By.CLASS_NAME, "flash-error")
# if we find that error message within errors, then login is failed
if any(error_message in e.text for e in errors):
print("[!] Login failed")
else:
print("[+] Login su")
i didnt find any help in the web
A:
The problem is you are selecting the wrong XPATH. Here's how to find the correct XPATH:
Open discord login page
Using inspect find the element
Right click on the element and select Copy > Copy XPATH
Here's your correct XPATH: //*[@id="app-mount"]/div[2]/div/div[1]/div/div/div/div/form/div[2]/div/div[1]/div[2]/button[2]
Here's your final code:
import time
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
# Github credentials
username = "email"
password = "password"
# initialize the Chrome driver
driver = webdriver.Chrome("chromedriver")
# head to github login page
driver.get("https://discord.com/login")
time.sleep(3)
# find username/email field and send the username itself to the input field
driver.find_element(By.NAME, 'email').send_keys(username)
# find password input field and insert password as well
driver.find_element(By.NAME, 'password').send_keys(password)
time.sleep(10)
# click login button
driver.find_element(By.XPATH, '//*[@id="app-mount"]/div[2]/div/div[1]/div/div/div/div/form/div[2]/div/div[1]/div[2]/button[2]').click()
# wait the ready state to be complete
WebDriverWait(driver=driver, timeout=10).until(
lambda x: x.execute_script("return document.readyState === 'complete'")
)
error_message = "Incorrect username or password."
# get the errors (if there are)
errors = driver.find_elements(By.CLASS_NAME, "flash-error")
# if we find that error message within errors, then login is failed
if any(error_message in e.text for e in errors):
print("[!] Login failed")
else:
print("[+] Login su")
| discord login button selenium | im trying to auto login to my discord account and stay online with pyton and selenium
the error :
driver.find_element(By.XPATH, '//*[@id="app-mount"]/div[2]/div/div[2]/div/div/form/div/div/div[1]/div[3]/button[2]').click()
this is my code :
import time
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
# Github credentials
username = "email"
password = "password"
# initialize the Chrome driver
driver = webdriver.Chrome("chromedriver")
# head to github login page
driver.get("https://discord.com/login")
time.sleep(3)
# find username/email field and send the username itself to the input field
driver.find_element(By.NAME, 'email').send_keys(username)
# find password input field and insert password as well
driver.find_element(By.NAME, 'password').send_keys(password)
time.sleep(10)
# click login button
driver.find_element(By.XPATH, '//*[@id="app-mount"]/div[2]/div/div[2]/div/div/form/div/div/div[1]/div[3]/button[2]').click()
# wait the ready state to be complete
WebDriverWait(driver=driver, timeout=10).until(
lambda x: x.execute_script("return document.readyState === 'complete'")
)
error_message = "Incorrect username or password."
# get the errors (if there are)
errors = driver.find_elements(By.CLASS_NAME, "flash-error")
# if we find that error message within errors, then login is failed
if any(error_message in e.text for e in errors):
print("[!] Login failed")
else:
print("[+] Login su")
i didnt find any help in the web
| [
"The problem is you are selecting the wrong XPATH. Here's how to find the correct XPATH:\n\nOpen discord login page\nUsing inspect find the element\nRight click on the element and select Copy > Copy XPATH\n\n\nHere's your correct XPATH: //*[@id=\"app-mount\"]/div[2]/div/div[1]/div/div/div/div/form/div[2]/div/div[1]/div[2]/button[2]\nHere's your final code:\nimport time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\n\n# Github credentials\nusername = \"email\"\npassword = \"password\"\n\n# initialize the Chrome driver\ndriver = webdriver.Chrome(\"chromedriver\")\n\n# head to github login page\ndriver.get(\"https://discord.com/login\")\ntime.sleep(3)\n\n\n# find username/email field and send the username itself to the input field\ndriver.find_element(By.NAME, 'email').send_keys(username)\n\n# find password input field and insert password as well\ndriver.find_element(By.NAME, 'password').send_keys(password)\ntime.sleep(10)\n# click login button\ndriver.find_element(By.XPATH, '//*[@id=\"app-mount\"]/div[2]/div/div[1]/div/div/div/div/form/div[2]/div/div[1]/div[2]/button[2]').click()\n\n\n\n# wait the ready state to be complete\nWebDriverWait(driver=driver, timeout=10).until(\n lambda x: x.execute_script(\"return document.readyState === 'complete'\")\n)\nerror_message = \"Incorrect username or password.\"\n# get the errors (if there are)\nerrors = driver.find_elements(By.CLASS_NAME, \"flash-error\")\n# if we find that error message within errors, then login is failed\nif any(error_message in e.text for e in errors):\n print(\"[!] Login failed\")\nelse:\n print(\"[+] Login su\")\n\n"
] | [
0
] | [] | [] | [
"python",
"selenium"
] | stackoverflow_0074675225_python_selenium.txt |
Q:
Python - Need Help Web Scraping Dynamic Website
I'm pretty new to web scraping and would appreciate any advice for the scenarios below:
I'm trying to produce a home loans listing table using data from https://www.canstar.com.au/home-loans/
I'm mainly trying to get listings values like the ones below:
Homestar Finance | Star Essentials P&I 80% | Variable
Unloan | Home Loan LVR <80% | Variable
TicToc Home Loans | Live-in Variable P&I | Variable
ubank | Neat Home Loan Owner Occupied P&I 70-80% | Variable
and push them into a nested table
results = [[Homestar Finance, Star Essentials P&I 80%, Variable], etc, etc]
My first attempt, I've used BeautifulSoup entirely and practice on an offline version of the site.
import pandas as pd
from bs4 import BeautifulSoup
with open('/local/path/canstar.html', 'r') as canstar_offline :
content = canstar_offline.read()
results = [['Affiliate', 'Product Name', 'Product Type']]
soup = BeautifulSoup(content, 'lxml')
for listing in soup.find_all('div', class_='table-cards-container') :
for listing1 in listing.find_all('a') :
if listing1.text.strip() != 'More details' and listing1.text.strip() != '' :
results.append(listing1.text.strip().split(' | '))
df = pd.DataFrame(results[1:], columns=results[0]).to_dict('list')
df2 = pd.DataFrame(df)
print(df2)
I pretty much got very close to what I wanted, but unfortunately it doesn't work for the actual site cause it looks like I'm getting blocked for repeated requests.
So I tried this again on Selenium but now I'm stuck.
I tried using as much of the transferrable filtering logic that I used from BS, but I can't get anywhere close to what I had using Selenium.
import time
from selenium.webdriver.common.by import By
url = 'https://www.canstar.com.au/home-loans'
results = []
driver = webdriver.Chrome()
driver.get(url)
# content = driver.page_source
# soup = BeautifulSoup(content)
time.sleep(3)
tables = driver.find_elements(By.CLASS_NAME, 'table-cards-container')
for table in tables :
listing = table.find_element(By.TAG_NAME, 'a')
print(listing.text)
This version (above) only returns one listing (I'm trying to get the entire table through iteration)
import time
from selenium.webdriver.common.by import By
url = 'https://www.canstar.com.au/home-loans'
results = []
driver = webdriver.Chrome()
driver.get(url)
# content = driver.page_source
# soup = BeautifulSoup(content)
time.sleep(3)
tables = driver.find_elements(By.CLASS_NAME, 'table-cards-container')
for table in tables :
# listing = table.find_element(By.TAG_NAME, 'a')
print(table.text)
This version (above) looks like it gets all the text from the 'table-cards-container' class, but I'm unable to filter through it to just get the listings.
A:
I think you can try something like this, I hope the comments in the code explain what it is doing.
# Needed libs
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# Initiate the driver and navigate
driver = webdriver.Chrome()
url = 'https://www.canstar.com.au/home-loans'
driver.get(url)
# We save the loans list
loans = WebDriverWait(driver, 30).until(EC.presence_of_all_elements_located((By.XPATH, "//cnslib-table-card")))
# We make a loop once per loan in the loop
for i in range(1, len(loans)):
# With this Xpath I save the title of the loan
loan_title = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, f"((//cnslib-table-card)[{i}]//a)[1]"))).text
print(loan_title)
# With this Xpath I save the first percentaje we see for the loan
loan_first_percentaje = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, f"((//cnslib-table-card)[{i}]//span)[1]"))).text
print(loan_first_percentaje)
# With this Xpath I save the second percentaje we see for the loan
loan_second_percentaje = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, f"((//cnslib-table-card)[{i}]//span)[3]"))).text
print(loan_second_percentaje)
# With this Xpath I save the amount we see for the loan
loan_amount = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, f"((//cnslib-table-card)[{i}]//span)[5]"))).text
print(loan_amount)
| Python - Need Help Web Scraping Dynamic Website | I'm pretty new to web scraping and would appreciate any advice for the scenarios below:
I'm trying to produce a home loans listing table using data from https://www.canstar.com.au/home-loans/
I'm mainly trying to get listings values like the ones below:
Homestar Finance | Star Essentials P&I 80% | Variable
Unloan | Home Loan LVR <80% | Variable
TicToc Home Loans | Live-in Variable P&I | Variable
ubank | Neat Home Loan Owner Occupied P&I 70-80% | Variable
and push them into a nested table
results = [[Homestar Finance, Star Essentials P&I 80%, Variable], etc, etc]
My first attempt, I've used BeautifulSoup entirely and practice on an offline version of the site.
import pandas as pd
from bs4 import BeautifulSoup
with open('/local/path/canstar.html', 'r') as canstar_offline :
content = canstar_offline.read()
results = [['Affiliate', 'Product Name', 'Product Type']]
soup = BeautifulSoup(content, 'lxml')
for listing in soup.find_all('div', class_='table-cards-container') :
for listing1 in listing.find_all('a') :
if listing1.text.strip() != 'More details' and listing1.text.strip() != '' :
results.append(listing1.text.strip().split(' | '))
df = pd.DataFrame(results[1:], columns=results[0]).to_dict('list')
df2 = pd.DataFrame(df)
print(df2)
I pretty much got very close to what I wanted, but unfortunately it doesn't work for the actual site cause it looks like I'm getting blocked for repeated requests.
So I tried this again on Selenium but now I'm stuck.
I tried using as much of the transferrable filtering logic that I used from BS, but I can't get anywhere close to what I had using Selenium.
import time
from selenium.webdriver.common.by import By
url = 'https://www.canstar.com.au/home-loans'
results = []
driver = webdriver.Chrome()
driver.get(url)
# content = driver.page_source
# soup = BeautifulSoup(content)
time.sleep(3)
tables = driver.find_elements(By.CLASS_NAME, 'table-cards-container')
for table in tables :
listing = table.find_element(By.TAG_NAME, 'a')
print(listing.text)
This version (above) only returns one listing (I'm trying to get the entire table through iteration)
import time
from selenium.webdriver.common.by import By
url = 'https://www.canstar.com.au/home-loans'
results = []
driver = webdriver.Chrome()
driver.get(url)
# content = driver.page_source
# soup = BeautifulSoup(content)
time.sleep(3)
tables = driver.find_elements(By.CLASS_NAME, 'table-cards-container')
for table in tables :
# listing = table.find_element(By.TAG_NAME, 'a')
print(table.text)
This version (above) looks like it gets all the text from the 'table-cards-container' class, but I'm unable to filter through it to just get the listings.
| [
"I think you can try something like this, I hope the comments in the code explain what it is doing.\n# Needed libs\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n# Initiate the driver and navigate\ndriver = webdriver.Chrome()\nurl = 'https://www.canstar.com.au/home-loans'\ndriver.get(url)\n\n# We save the loans list\nloans = WebDriverWait(driver, 30).until(EC.presence_of_all_elements_located((By.XPATH, \"//cnslib-table-card\")))\n\n# We make a loop once per loan in the loop\nfor i in range(1, len(loans)):\n # With this Xpath I save the title of the loan\n loan_title = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, f\"((//cnslib-table-card)[{i}]//a)[1]\"))).text\n print(loan_title)\n # With this Xpath I save the first percentaje we see for the loan\n loan_first_percentaje = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, f\"((//cnslib-table-card)[{i}]//span)[1]\"))).text\n print(loan_first_percentaje)\n # With this Xpath I save the second percentaje we see for the loan\n loan_second_percentaje = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, f\"((//cnslib-table-card)[{i}]//span)[3]\"))).text\n print(loan_second_percentaje)\n # With this Xpath I save the amount we see for the loan\n loan_amount = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, f\"((//cnslib-table-card)[{i}]//span)[5]\"))).text\n print(loan_amount)\n\n"
] | [
0
] | [] | [] | [
"beautifulsoup",
"dynamic",
"python",
"selenium",
"web_scraping"
] | stackoverflow_0074674619_beautifulsoup_dynamic_python_selenium_web_scraping.txt |
Q:
Add to the list, a value of a column of the current row of a DataFrame only if the previous rows pass the test
A brief example of my CSV file (there is no way to publish complete by the limit of characters):
market_name,runner_name,odds,result,back
First Half Goals 0.5,Over 0.5 Goals,1.7,WINNER,0.6545
Over/Under 6.5 Goals,Under 6.5 Goals,1.01,WINNER,0.00935
Over/Under 0.5 Goals,Over 0.5 Goals,1.71,WINNER,0.66385
Over/Under 2.5 Goals,Under 2.5 Goals,1.41,WINNER,0.3833499999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.25,WINNER,0.23375
Over/Under 4.5 Goals,Under 4.5 Goals,1.34,WINNER,0.3179
First Half Goals 0.5,Under 0.5 Goals,1.96,WINNER,0.8976000000000001
Over/Under 1.5 Goals,Over 1.5 Goals,1.6,WINNER,0.5610000000000002
Over/Under 2.5 Goals,Over 2.5 Goals,1.21,WINNER,0.1963499999999999
Over/Under 3.5 Goals,Over 3.5 Goals,1.18,WINNER,0.1682999999999999
Over/Under 3.5 Goals,Under 3.5 Goals,1.98,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.09,WINNER,0.08415
Over/Under 3.5 Goals,Over 3.5 Goals,2.02,LOSER,-1.0
Over/Under 1.5 Goals,Under 1.5 Goals,3.15,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.44,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.7,WINNER,0.6545
Over/Under 1.5 Goals,Over 1.5 Goals,1.24,WINNER,0.2244
Over/Under 4.5 Goals,Over 4.5 Goals,2.06,WINNER,0.9911
Over/Under 3.5 Goals,Under 3.5 Goals,2.0,WINNER,0.935
Over/Under 1.5 Goals,Under 1.5 Goals,1.41,WINNER,0.3833499999999999
Over/Under 7.5 Goals,Under 7.5 Goals,1.27,WINNER,0.25245
Over/Under 5.5 Goals,Under 5.5 Goals,1.5,WINNER,0.4675
Over/Under 4.5 Goals,Under 4.5 Goals,1.29,WINNER,0.27115
Over/Under 1.5 Goals,Over 1.5 Goals,1.15,WINNER,0.1402499999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.53,WINNER,0.49555
Over/Under 1.5 Goals,Over 1.5 Goals,1.57,WINNER,0.53295
First Half Goals 0.5,Over 0.5 Goals,1.44,WINNER,0.4114
Over/Under 0.5 Goals,Over 0.5 Goals,2.06,WINNER,0.9911
First Half Goals 0.5,Under 0.5 Goals,2.32,WINNER,1.2342
First Half Goals 0.5,Under 0.5 Goals,1.87,WINNER,0.8134500000000001
Over/Under 2.5 Goals,Under 2.5 Goals,1.2,WINNER,0.1869999999999999
First Half Goals 0.5,Under 0.5 Goals,1.08,WINNER,0.0748
First Half Goals 0.5,Over 0.5 Goals,2.02,WINNER,0.9537
Over/Under 1.5 Goals,Under 1.5 Goals,1.69,WINNER,0.64515
Over/Under 0.5 Goals,Over 0.5 Goals,1.25,LOSER,-1.0
Over/Under 0.5 Goals,Under 0.5 Goals,4.7,WINNER,3.4595
First Half Goals 0.5,Over 0.5 Goals,1.74,WINNER,0.6919000000000001
First Half Goals 1.5,Under 1.5 Goals,1.41,LOSER,-1.0
Over/Under 0.5 Goals,Under 0.5 Goals,4.3,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,2.44,WINNER,1.3464
Over/Under 1.5 Goals,Over 1.5 Goals,1.6,WINNER,0.5610000000000002
First Half Goals 0.5,Under 0.5 Goals,1.77,WINNER,0.7199500000000001
First Half Goals 1.5,Under 1.5 Goals,1.88,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.93,LOSER,-1.0
Over/Under 0.5 Goals,Under 0.5 Goals,1.62,WINNER,0.5797000000000001
Over/Under 3.5 Goals,Under 3.5 Goals,1.93,WINNER,0.86955
First Half Goals 0.5,Under 0.5 Goals,1.4,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.31,WINNER,0.28985
First Half Goals 0.5,Under 0.5 Goals,1.23,WINNER,0.21505
Over/Under 1.5 Goals,Under 1.5 Goals,1.75,WINNER,0.70125
Over/Under 2.5 Goals,Under 2.5 Goals,1.24,WINNER,0.2244
Over/Under 3.5 Goals,Under 3.5 Goals,1.1,WINNER,0.0935
First Half Goals 0.5,Under 0.5 Goals,1.13,WINNER,0.1215499999999999
Over/Under 5.5 Goals,Under 5.5 Goals,1.15,WINNER,0.1402499999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.4,WINNER,0.3739999999999999
Over/Under 0.5 Goals,Under 0.5 Goals,3.7,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,1.7,LOSER,-1.0
Over/Under 1.5 Goals,Under 1.5 Goals,4.0,WINNER,2.805
First Half Goals 0.5,Over 0.5 Goals,1.73,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.17,WINNER,0.1589499999999999
First Half Goals 0.5,Over 0.5 Goals,1.97,WINNER,0.90695
Over/Under 0.5 Goals,Under 0.5 Goals,5.6,WINNER,4.301
Over/Under 1.5 Goals,Over 1.5 Goals,1.44,WINNER,0.4114
First Half Goals 0.5,Over 0.5 Goals,1.75,WINNER,0.70125
First Half Goals 0.5,Over 0.5 Goals,1.87,WINNER,0.8134500000000001
Over/Under 0.5 Goals,Over 0.5 Goals,2.02,WINNER,0.9537
First Half Goals 0.5,Under 0.5 Goals,1.75,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.61,WINNER,0.5703500000000001
Over/Under 0.5 Goals,Under 0.5 Goals,2.26,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,2.02,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.8,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.4,WINNER,0.3739999999999999
Over/Under 2.5 Goals,Under 2.5 Goals,2.32,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.27,WINNER,0.25245
First Half Goals 0.5,Under 0.5 Goals,1.5,LOSER,-1.0
Over/Under 2.5 Goals,Under 2.5 Goals,1.06,WINNER,0.0561
Over/Under 4.5 Goals,Over 4.5 Goals,3.3,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.18,WINNER,0.1682999999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.41,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,1.9,WINNER,0.8414999999999999
First Half Goals 0.5,Under 0.5 Goals,1.04,WINNER,0.0374
First Half Goals 1.5,Over 1.5 Goals,2.02,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.73,WINNER,0.68255
Over/Under 4.5 Goals,Over 4.5 Goals,1.47,WINNER,0.43945
Over/Under 2.5 Goals,Over 2.5 Goals,1.33,WINNER,0.3085500000000001
Over/Under 1.5 Goals,Over 1.5 Goals,2.08,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.33,WINNER,0.3085500000000001
First Half Goals 1.5,Under 1.5 Goals,1.42,WINNER,0.3926999999999999
First Half Goals 0.5,Under 0.5 Goals,1.25,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.5,WINNER,0.4675
First Half Goals 0.5,Over 0.5 Goals,2.02,WINNER,0.9537
First Half Goals 0.5,Under 0.5 Goals,1.5,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.98,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.11,WINNER,0.1028500000000001
First Half Goals 0.5,Under 0.5 Goals,1.03,WINNER,0.02805
First Half Goals 0.5,Over 0.5 Goals,2.42,WINNER,1.3277
First Half Goals 0.5,Under 0.5 Goals,1.23,LOSER,-1.0
Over/Under 4.5 Goals,Over 4.5 Goals,4.7,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.71,WINNER,0.66385
Over/Under 2.5 Goals,Over 2.5 Goals,1.67,WINNER,0.62645
Over/Under 1.5 Goals,Under 1.5 Goals,1.54,WINNER,0.5049
First Half Goals 1.5,Under 1.5 Goals,1.46,WINNER,0.4301
First Half Goals 1.5,Under 1.5 Goals,1.13,WINNER,0.1215499999999999
First Half Goals 0.5,Under 0.5 Goals,1.2,WINNER,0.1869999999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.84,WINNER,0.7854000000000001
First Half Goals 0.5,Over 0.5 Goals,1.68,WINNER,0.6358
Over/Under 1.5 Goals,Over 1.5 Goals,1.73,LOSER,-1.0
Over/Under 1.5 Goals,Under 1.5 Goals,1.27,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.73,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.24,WINNER,0.2244
First Half Goals 0.5,Under 0.5 Goals,2.0,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.9,WINNER,0.8414999999999999
First Half Goals 1.5,Under 1.5 Goals,1.33,WINNER,0.3085500000000001
First Half Goals 0.5,Over 0.5 Goals,2.02,WINNER,0.9537
First Half Goals 0.5,Under 0.5 Goals,1.51,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.23,WINNER,0.21505
Over/Under 2.5 Goals,Over 2.5 Goals,1.29,WINNER,0.27115
First Half Goals 1.5,Over 1.5 Goals,2.02,WINNER,0.9537
Over/Under 3.5 Goals,Over 3.5 Goals,1.68,LOSER,-1.0
Over/Under 1.5 Goals,Under 1.5 Goals,1.18,WINNER,0.1682999999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.16,WINNER,0.1495999999999999
First Half Goals 0.5,Under 0.5 Goals,2.3,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.77,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.94,WINNER,0.8789
First Half Goals 0.5,Under 0.5 Goals,1.87,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.14,WINNER,0.1308999999999999
Over/Under 2.5 Goals,Over 2.5 Goals,1.24,LOSER,-1.0
Over/Under 0.5 Goals,Over 0.5 Goals,1.21,WINNER,0.1963499999999999
First Half Goals 1.5,Over 1.5 Goals,1.91,WINNER,0.85085
Over/Under 2.5 Goals,Over 2.5 Goals,1.77,LOSER,-1.0
Over/Under 0.5 Goals,Over 0.5 Goals,1.82,WINNER,0.7667
Over/Under 1.5 Goals,Under 1.5 Goals,2.14,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.16,WINNER,0.1495999999999999
First Half Goals 1.5,Under 1.5 Goals,1.11,WINNER,0.1028500000000001
Over/Under 0.5 Goals,Over 0.5 Goals,1.71,LOSER,-1.0
First Half Goals 1.5,Over 1.5 Goals,1.85,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.48,WINNER,0.4488
First Half Goals 1.5,Under 1.5 Goals,1.87,WINNER,0.8134500000000001
Over/Under 1.5 Goals,Over 1.5 Goals,1.26,WINNER,0.2431
Over/Under 1.5 Goals,Over 1.5 Goals,1.18,WINNER,0.1682999999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.47,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.58,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.46,WINNER,0.4301
Over/Under 1.5 Goals,Over 1.5 Goals,1.83,WINNER,0.7760500000000001
Over/Under 2.5 Goals,Over 2.5 Goals,2.58,LOSER,-1.0
Over/Under 1.5 Goals,Under 1.5 Goals,1.42,LOSER,-1.0
First Half Goals 2.5,Under 2.5 Goals,1.42,WINNER,0.3926999999999999
First Half Goals 0.5,Over 0.5 Goals,2.1,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,2.04,WINNER,0.9724
Over/Under 1.5 Goals,Under 1.5 Goals,1.26,WINNER,0.2431
First Half Goals 0.5,Under 0.5 Goals,1.4,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.95,WINNER,0.88825
First Half Goals 1.5,Under 1.5 Goals,1.32,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.29,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.16,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.3,WINNER,0.2805000000000001
First Half Goals 0.5,Over 0.5 Goals,1.45,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.58,WINNER,0.5423000000000001
Over/Under 2.5 Goals,Under 2.5 Goals,1.76,WINNER,0.7106
Over/Under 0.5 Goals,Over 0.5 Goals,1.54,WINNER,0.5049
Over/Under 4.5 Goals,Under 4.5 Goals,1.63,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.18,WINNER,0.1682999999999999
Over/Under 2.5 Goals,Over 2.5 Goals,1.71,WINNER,0.66385
Over/Under 2.5 Goals,Under 2.5 Goals,1.95,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,2.8,WINNER,1.6829999999999998
Over/Under 3.5 Goals,Under 3.5 Goals,2.2,WINNER,1.1220000000000003
Over/Under 1.5 Goals,Over 1.5 Goals,2.16,LOSER,-1.0
Over/Under 2.5 Goals,Under 2.5 Goals,1.5,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.27,WINNER,0.25245
Over/Under 6.5 Goals,Under 6.5 Goals,2.0,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,2.06,WINNER,0.9911
Over/Under 3.5 Goals,Under 3.5 Goals,1.9,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,2.08,WINNER,1.0098
Over/Under 1.5 Goals,Over 1.5 Goals,1.54,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.4,LOSER,-1.0
Over/Under 3.5 Goals,Under 3.5 Goals,3.35,WINNER,2.1972500000000004
Over/Under 2.5 Goals,Over 2.5 Goals,1.44,WINNER,0.4114
Over/Under 5.5 Goals,Over 5.5 Goals,1.33,WINNER,0.3085500000000001
Over/Under 1.5 Goals,Under 1.5 Goals,1.94,WINNER,0.8789
Over/Under 3.5 Goals,Under 3.5 Goals,1.01,WINNER,0.00935
First Half Goals 1.5,Under 1.5 Goals,1.78,WINNER,0.7293000000000001
First Half Goals 0.5,Over 0.5 Goals,1.63,WINNER,0.58905
First Half Goals 0.5,Over 0.5 Goals,1.75,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,2.38,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.37,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.4,WINNER,0.3739999999999999
First Half Goals 2.5,Under 2.5 Goals,1.01,WINNER,0.00935
Over/Under 1.5 Goals,Under 1.5 Goals,1.93,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.14,WINNER,0.1308999999999999
First Half Goals 0.5,Over 0.5 Goals,1.47,WINNER,0.43945
Over/Under 3.5 Goals,Over 3.5 Goals,1.55,WINNER,0.5142500000000001
Over/Under 1.5 Goals,Over 1.5 Goals,1.1,WINNER,0.0935
First Half Goals 0.5,Under 0.5 Goals,1.4,WINNER,0.3739999999999999
Over/Under 2.5 Goals,Over 2.5 Goals,3.0,LOSER,-1.0
Over/Under 2.5 Goals,Under 2.5 Goals,1.04,WINNER,0.0374
First Half Goals 0.5,Under 0.5 Goals,1.28,WINNER,0.2618
First Half Goals 1.5,Under 1.5 Goals,1.32,WINNER,0.2992000000000001
Over/Under 0.5 Goals,Over 0.5 Goals,1.33,WINNER,0.3085500000000001
Over/Under 0.5 Goals,Over 0.5 Goals,1.31,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,1.38,WINNER,0.3552999999999999
First Half Goals 2.5,Under 2.5 Goals,1.3,WINNER,0.2805000000000001
Over/Under 1.5 Goals,Over 1.5 Goals,1.3,WINNER,0.2805000000000001
First Half Goals 0.5,Under 0.5 Goals,1.65,WINNER,0.6077499999999999
First Half Goals 0.5,Under 0.5 Goals,1.39,WINNER,0.3646499999999999
First Half Goals 0.5,Over 0.5 Goals,1.71,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,1.85,WINNER,0.7947500000000002
First Half Goals 0.5,Under 0.5 Goals,1.72,WINNER,0.6732
Over/Under 0.5 Goals,Under 0.5 Goals,1.66,WINNER,0.6171
Over/Under 0.5 Goals,Under 0.5 Goals,2.92,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,2.5,WINNER,1.4025
Over/Under 1.5 Goals,Under 1.5 Goals,1.31,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.22,WINNER,0.2057
Over/Under 2.5 Goals,Over 2.5 Goals,1.08,WINNER,0.0748
Over/Under 1.5 Goals,Over 1.5 Goals,1.17,WINNER,0.1589499999999999
Over/Under 0.5 Goals,Over 0.5 Goals,1.24,WINNER,0.2244
Over/Under 1.5 Goals,Over 1.5 Goals,1.58,LOSER,-1.0
Over/Under 2.5 Goals,Under 2.5 Goals,1.66,LOSER,-1.0
Over/Under 3.5 Goals,Under 3.5 Goals,1.47,WINNER,0.43945
Over/Under 3.5 Goals,Over 3.5 Goals,1.12,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,2.16,LOSER,-1.0
Over/Under 1.5 Goals,Under 1.5 Goals,3.25,WINNER,2.10375
Over/Under 1.5 Goals,Under 1.5 Goals,3.1,WINNER,1.9635
Over/Under 0.5 Goals,Over 0.5 Goals,1.48,WINNER,0.4488
Over/Under 2.5 Goals,Over 2.5 Goals,1.21,WINNER,0.1963499999999999
Over/Under 3.5 Goals,Over 3.5 Goals,1.23,WINNER,0.21505
Over/Under 2.5 Goals,Under 2.5 Goals,4.7,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.29,WINNER,0.27115
Over/Under 3.5 Goals,Under 3.5 Goals,1.05,WINNER,0.04675
First Half Goals 0.5,Under 0.5 Goals,2.04,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.15,WINNER,0.1402499999999999
Over/Under 3.5 Goals,Over 3.5 Goals,1.43,WINNER,0.4020499999999999
Over/Under 0.5 Goals,Over 0.5 Goals,1.49,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.28,WINNER,0.2618
Over/Under 1.5 Goals,Under 1.5 Goals,5.4,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.45,WINNER,0.4207499999999999
Over/Under 1.5 Goals,Under 1.5 Goals,1.19,WINNER,0.1776499999999999
Over/Under 2.5 Goals,Under 2.5 Goals,1.38,WINNER,0.3552999999999999
First Half Goals 0.5,Under 0.5 Goals,1.42,LOSER,-1.0
Over/Under 0.5 Goals,Over 0.5 Goals,1.33,LOSER,-1.0
Over/Under 0.5 Goals,Over 0.5 Goals,1.3,WINNER,0.2805000000000001
Over/Under 3.5 Goals,Over 3.5 Goals,1.28,WINNER,0.2618
Over/Under 1.5 Goals,Under 1.5 Goals,2.94,LOSER,-1.0
Over/Under 2.5 Goals,Under 2.5 Goals,1.95,LOSER,-1.0
Over/Under 0.5 Goals,Under 0.5 Goals,1.87,WINNER,0.8134500000000001
Over/Under 1.5 Goals,Under 1.5 Goals,1.27,WINNER,0.25245
Over/Under 4.5 Goals,Over 4.5 Goals,1.09,WINNER,0.08415
Over/Under 3.5 Goals,Under 3.5 Goals,1.01,WINNER,0.00935
Over/Under 1.5 Goals,Over 1.5 Goals,2.14,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.02,WINNER,0.0187
Over/Under 4.5 Goals,Over 4.5 Goals,1.17,WINNER,0.1589499999999999
Over/Under 5.5 Goals,Over 5.5 Goals,1.24,WINNER,0.2244
First Half Goals 0.5,Under 0.5 Goals,1.4,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.35,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,1.51,LOSER,-1.0
First Half Goals 1.5,Over 1.5 Goals,2.62,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.81,WINNER,0.7573500000000001
Over/Under 0.5 Goals,Over 0.5 Goals,1.2,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.36,WINNER,0.3366000000000001
First Half Goals 0.5,Over 0.5 Goals,1.63,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.8,WINNER,0.7480000000000001
First Half Goals 0.5,Under 0.5 Goals,1.68,WINNER,0.6358
Over/Under 0.5 Goals,Under 0.5 Goals,1.27,WINNER,0.25245
Over/Under 1.5 Goals,Over 1.5 Goals,1.59,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.59,WINNER,0.5516500000000001
Over/Under 2.5 Goals,Over 2.5 Goals,1.62,WINNER,0.5797000000000001
Over/Under 0.5 Goals,Over 0.5 Goals,1.64,LOSER,-1.0
Over/Under 4.5 Goals,Over 4.5 Goals,1.34,WINNER,0.3179
Over/Under 4.5 Goals,Over 4.5 Goals,1.34,WINNER,0.3179
First Half Goals 0.5,Under 0.5 Goals,1.6,WINNER,0.5610000000000002
Over/Under 1.5 Goals,Over 1.5 Goals,2.1,LOSER,-1.0
Over/Under 5.5 Goals,Under 5.5 Goals,1.52,LOSER,-1.0
Over/Under 2.5 Goals,Under 2.5 Goals,1.19,WINNER,0.1776499999999999
Over/Under 0.5 Goals,Over 0.5 Goals,1.2,WINNER,0.1869999999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.1,WINNER,0.0935
Over/Under 3.5 Goals,Under 3.5 Goals,1.27,LOSER,-1.0
Over/Under 7.5 Goals,Under 7.5 Goals,1.38,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.87,LOSER,-1.0
Over/Under 1.5 Goals,Under 1.5 Goals,2.14,LOSER,-1.0
Over/Under 3.5 Goals,Under 3.5 Goals,1.22,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.5,WINNER,0.4675
First Half Goals 0.5,Under 0.5 Goals,1.12,WINNER,0.1122000000000001
First Half Goals 2.5,Under 2.5 Goals,1.27,WINNER,0.25245
First Half Goals 0.5,Under 0.5 Goals,1.21,WINNER,0.1963499999999999
Over/Under 2.5 Goals,Over 2.5 Goals,1.19,WINNER,0.1776499999999999
Over/Under 2.5 Goals,Over 2.5 Goals,1.25,WINNER,0.23375
Over/Under 2.5 Goals,Under 2.5 Goals,2.48,LOSER,-1.0
First Half Goals 1.5,Over 1.5 Goals,1.72,WINNER,0.6732
First Half Goals 0.5,Over 0.5 Goals,1.62,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,1.52,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,2.38,WINNER,1.2903
First Half Goals 0.5,Under 0.5 Goals,1.82,WINNER,0.7667
First Half Goals 0.5,Under 0.5 Goals,2.24,WINNER,1.1594000000000002
Over/Under 0.5 Goals,Over 0.5 Goals,1.28,WINNER,0.2618
Over/Under 0.5 Goals,Under 0.5 Goals,4.0,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,1.77,LOSER,-1.0
Over/Under 0.5 Goals,Under 0.5 Goals,3.15,WINNER,2.01025
Over/Under 0.5 Goals,Over 0.5 Goals,1.66,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.15,WINNER,0.1402499999999999
First Half Goals 0.5,Under 0.5 Goals,2.42,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.28,LOSER,-1.0
Over/Under 2.5 Goals,Under 2.5 Goals,1.28,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.55,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.51,WINNER,0.47685
First Half Goals 1.5,Under 1.5 Goals,1.32,LOSER,-1.0
Over/Under 3.5 Goals,Under 3.5 Goals,1.33,WINNER,0.3085500000000001
First Half Goals 0.5,Under 0.5 Goals,2.18,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.82,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,1.63,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.29,WINNER,0.27115
First Half Goals 1.5,Over 1.5 Goals,1.47,WINNER,0.43945
First Half Goals 1.5,Over 1.5 Goals,1.76,WINNER,0.7106
First Half Goals 0.5,Under 0.5 Goals,1.3,WINNER,0.2805000000000001
First Half Goals 2.5,Under 2.5 Goals,1.54,WINNER,0.5049
Over/Under 4.5 Goals,Over 4.5 Goals,1.12,WINNER,0.1122000000000001
Over/Under 0.5 Goals,Over 0.5 Goals,1.28,LOSER,-1.0
Over/Under 4.5 Goals,Over 4.5 Goals,1.16,WINNER,0.1495999999999999
Over/Under 0.5 Goals,Under 0.5 Goals,1.95,WINNER,0.88825
Over/Under 0.5 Goals,Under 0.5 Goals,2.36,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.57,WINNER,0.53295
First Half Goals 0.5,Under 0.5 Goals,1.44,LOSER,-1.0
Over/Under 3.5 Goals,Under 3.5 Goals,1.86,WINNER,0.8041000000000001
Over/Under 2.5 Goals,Under 2.5 Goals,2.18,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.22,WINNER,0.2057
Over/Under 0.5 Goals,Over 0.5 Goals,1.23,WINNER,0.21505
First Half Goals 0.5,Over 0.5 Goals,2.26,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.45,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.58,WINNER,0.5423000000000001
First Half Goals 1.5,Under 1.5 Goals,1.3,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.25,WINNER,0.23375
First Half Goals 0.5,Over 0.5 Goals,1.32,WINNER,0.2992000000000001
Over/Under 0.5 Goals,Under 0.5 Goals,1.22,WINNER,0.2057
First Half Goals 1.5,Over 1.5 Goals,2.44,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.21,LOSER,-1.0
Over/Under 0.5 Goals,Over 0.5 Goals,1.29,WINNER,0.27115
Over/Under 0.5 Goals,Over 0.5 Goals,1.72,WINNER,0.6732
Over/Under 0.5 Goals,Under 0.5 Goals,2.5,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.99,WINNER,0.92565
Over/Under 3.5 Goals,Over 3.5 Goals,1.65,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.54,WINNER,0.5049
Over/Under 1.5 Goals,Under 1.5 Goals,1.67,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.43,WINNER,0.4020499999999999
First Half Goals 0.5,Under 0.5 Goals,1.46,WINNER,0.4301
First Half Goals 1.5,Under 1.5 Goals,1.2,WINNER,0.1869999999999999
First Half Goals 2.5,Under 2.5 Goals,1.16,WINNER,0.1495999999999999
Over/Under 4.5 Goals,Over 4.5 Goals,1.16,WINNER,0.1495999999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.23,WINNER,0.21505
Over/Under 1.5 Goals,Over 1.5 Goals,1.24,WINNER,0.2244
Over/Under 4.5 Goals,Over 4.5 Goals,1.19,WINNER,0.1776499999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.61,WINNER,0.5703500000000001
Over/Under 3.5 Goals,Over 3.5 Goals,1.47,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.83,WINNER,0.7760500000000001
Over/Under 0.5 Goals,Under 0.5 Goals,2.0,WINNER,0.935
First Half Goals 2.5,Under 2.5 Goals,1.46,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,2.36,WINNER,1.2716
Over/Under 3.5 Goals,Under 3.5 Goals,1.6,LOSER,-1.0
Over/Under 3.5 Goals,Under 3.5 Goals,1.68,WINNER,0.6358
Over/Under 2.5 Goals,Under 2.5 Goals,1.52,WINNER,0.4862
Over/Under 3.5 Goals,Under 3.5 Goals,1.3,WINNER,0.2805000000000001
First Half Goals 0.5,Over 0.5 Goals,1.82,WINNER,0.7667
Over/Under 1.5 Goals,Under 1.5 Goals,4.1,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,1.75,WINNER,0.70125
First Half Goals 0.5,Under 0.5 Goals,1.48,LOSER,-1.0
Over/Under 0.5 Goals,Under 0.5 Goals,1.53,LOSER,-1.0
Over/Under 4.5 Goals,Under 4.5 Goals,1.22,WINNER,0.2057
First Half Goals 1.5,Under 1.5 Goals,1.86,WINNER,0.8041000000000001
Over/Under 0.5 Goals,Under 0.5 Goals,1.73,WINNER,0.68255
Over/Under 0.5 Goals,Under 0.5 Goals,1.2,WINNER,0.1869999999999999
Over/Under 2.5 Goals,Under 2.5 Goals,1.58,WINNER,0.5423000000000001
First Half Goals 0.5,Under 0.5 Goals,1.28,WINNER,0.2618
Over/Under 2.5 Goals,Over 2.5 Goals,1.28,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.23,WINNER,0.21505
Over/Under 2.5 Goals,Over 2.5 Goals,1.31,WINNER,0.28985
Over/Under 1.5 Goals,Under 1.5 Goals,3.05,LOSER,-1.0
Over/Under 0.5 Goals,Over 0.5 Goals,1.16,WINNER,0.1495999999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.24,WINNER,0.2244
Over/Under 0.5 Goals,Over 0.5 Goals,1.21,WINNER,0.1963499999999999
Over/Under 0.5 Goals,Over 0.5 Goals,1.45,WINNER,0.4207499999999999
Over/Under 0.5 Goals,Over 0.5 Goals,1.4,LOSER,-1.0
Over/Under 0.5 Goals,Over 0.5 Goals,2.2,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.56,WINNER,0.5236000000000001
Over/Under 0.5 Goals,Over 0.5 Goals,2.74,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.18,WINNER,0.1682999999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.21,WINNER,0.1963499999999999
First Half Goals 1.5,Under 1.5 Goals,2.18,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.05,WINNER,0.04675
Over/Under 1.5 Goals,Over 1.5 Goals,1.27,WINNER,0.25245
First Half Goals 0.5,Over 0.5 Goals,1.89,WINNER,0.83215
First Half Goals 0.5,Over 0.5 Goals,1.8,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.9,LOSER,-1.0
Over/Under 0.5 Goals,Under 0.5 Goals,1.75,WINNER,0.70125
Over/Under 1.5 Goals,Over 1.5 Goals,1.3,WINNER,0.2805000000000001
First Half Goals 1.5,Over 1.5 Goals,1.7,WINNER,0.6545
First Half Goals 0.5,Over 0.5 Goals,1.78,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.84,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.97,WINNER,0.90695
Over/Under 1.5 Goals,Over 1.5 Goals,1.23,WINNER,0.21505
Over/Under 2.5 Goals,Over 2.5 Goals,1.3,LOSER,-1.0
First Half Goals 2.5,Under 2.5 Goals,1.3,WINNER,0.2805000000000001
Over/Under 1.5 Goals,Over 1.5 Goals,1.38,LOSER,-1.0
Over/Under 1.5 Goals,Under 1.5 Goals,2.2,LOSER,-1.0
This CSV file contains the results of my investments.
The column containing the profit/loss is the column named back
And I want to test it like this:
Let's assume that I want to see the total profit only for the investments I would make if according to some filters this investment pattern was profitable in previous records.
Example:
The 51st investment is 'market_name' → Over/Under 2.5 Goals, 'runner_name' → Under 2.5 Goals and 'odds' → 1.24
So I want to sum the profit/loss of 50th previous investments if they have these same filters, if the sum of these filters is greater than zero, then I make investment 51.
And so on in each of the lines, 100th investment, I see if the previous 99 investments filtering the cited options will be profitable, if so, I add the back column of 100th to the list for final sum.
So I created this code:
import pandas as pd
df = pd.read_csv(test.csv')
df = df[df['result'].notnull()]
matches = []
for number in range(len(df)):
try:
dfilter = df[:number]
filter = dfilter[(dfilter['market_name'] == df['market_name'][number+1]) & (dfilter['runner_name'] == df['runner_name'][number+1]) & (dfilter['odds'] == df['odds'][number+1])]
back_sum = filter['back'].sum()
if back_sum > 0:
matches.append(df['back'][number+1])
except:
pass
print(sum(matches))
But the final sum is delivering a result that does not match my real results where I invest.
I can't find where the flaw is in the code because it looks correct to me visually.
A:
Slice df[:number] means to take elements up to number. And when referring to the current line, you must use number, not number+1. This can be checked, for example, print df[:3] and get all the lines up to the third one.
But if you use loc, then operations through the slice will not be up to, but inclusive (you should not forget about this). That is, with df.loc[:3, :] rows will be selected, including the third one.
That is, you need this:
for number in range(len(df)):
try:
dfilter = df[:number]
filter = dfilter[(dfilter['market_name'] == df['market_name'][number]) &
(dfilter['runner_name'] == df['runner_name'][number]) & (dfilter['odds'] == df['odds'][number])]
back_sum = filter['back'].sum()
if back_sum > 0:
matches.append(df['back'][number])
except:
pass
If the dataframe is large, the loop will be slow. I can recommend List comprehension, which is many times faster than a loop. Below I made a column 'invest', where, depending on back_sum, the values will be True or False.
df = pd.read_csv('test.csv')
#df = df[df['result'].notnull()].reset_index(drop=True)
"""
is whether all indexes of the original dataframe are needed,
if not, then you can add this line and use the filtered dataframe(if you need a filtered dataframe,
uncomment this line df = df[df['result'].notnull()].reset_index(drop=True))
"""
def my_func(i):
dfilter = df[:i]
filter = dfilter[(dfilter['market_name'] == df['market_name'][i]) &
(dfilter['runner_name'] == df['runner_name'][i]) & (dfilter['odds'] == df['odds'][i])]
back_sum = filter['back'].sum()
aaa = True
if back_sum <= 0:
aaa = False
return aaa
df['invest'] = [my_func(i) for i in range(len(df))]
| Add to the list, a value of a column of the current row of a DataFrame only if the previous rows pass the test | A brief example of my CSV file (there is no way to publish complete by the limit of characters):
market_name,runner_name,odds,result,back
First Half Goals 0.5,Over 0.5 Goals,1.7,WINNER,0.6545
Over/Under 6.5 Goals,Under 6.5 Goals,1.01,WINNER,0.00935
Over/Under 0.5 Goals,Over 0.5 Goals,1.71,WINNER,0.66385
Over/Under 2.5 Goals,Under 2.5 Goals,1.41,WINNER,0.3833499999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.25,WINNER,0.23375
Over/Under 4.5 Goals,Under 4.5 Goals,1.34,WINNER,0.3179
First Half Goals 0.5,Under 0.5 Goals,1.96,WINNER,0.8976000000000001
Over/Under 1.5 Goals,Over 1.5 Goals,1.6,WINNER,0.5610000000000002
Over/Under 2.5 Goals,Over 2.5 Goals,1.21,WINNER,0.1963499999999999
Over/Under 3.5 Goals,Over 3.5 Goals,1.18,WINNER,0.1682999999999999
Over/Under 3.5 Goals,Under 3.5 Goals,1.98,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.09,WINNER,0.08415
Over/Under 3.5 Goals,Over 3.5 Goals,2.02,LOSER,-1.0
Over/Under 1.5 Goals,Under 1.5 Goals,3.15,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.44,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.7,WINNER,0.6545
Over/Under 1.5 Goals,Over 1.5 Goals,1.24,WINNER,0.2244
Over/Under 4.5 Goals,Over 4.5 Goals,2.06,WINNER,0.9911
Over/Under 3.5 Goals,Under 3.5 Goals,2.0,WINNER,0.935
Over/Under 1.5 Goals,Under 1.5 Goals,1.41,WINNER,0.3833499999999999
Over/Under 7.5 Goals,Under 7.5 Goals,1.27,WINNER,0.25245
Over/Under 5.5 Goals,Under 5.5 Goals,1.5,WINNER,0.4675
Over/Under 4.5 Goals,Under 4.5 Goals,1.29,WINNER,0.27115
Over/Under 1.5 Goals,Over 1.5 Goals,1.15,WINNER,0.1402499999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.53,WINNER,0.49555
Over/Under 1.5 Goals,Over 1.5 Goals,1.57,WINNER,0.53295
First Half Goals 0.5,Over 0.5 Goals,1.44,WINNER,0.4114
Over/Under 0.5 Goals,Over 0.5 Goals,2.06,WINNER,0.9911
First Half Goals 0.5,Under 0.5 Goals,2.32,WINNER,1.2342
First Half Goals 0.5,Under 0.5 Goals,1.87,WINNER,0.8134500000000001
Over/Under 2.5 Goals,Under 2.5 Goals,1.2,WINNER,0.1869999999999999
First Half Goals 0.5,Under 0.5 Goals,1.08,WINNER,0.0748
First Half Goals 0.5,Over 0.5 Goals,2.02,WINNER,0.9537
Over/Under 1.5 Goals,Under 1.5 Goals,1.69,WINNER,0.64515
Over/Under 0.5 Goals,Over 0.5 Goals,1.25,LOSER,-1.0
Over/Under 0.5 Goals,Under 0.5 Goals,4.7,WINNER,3.4595
First Half Goals 0.5,Over 0.5 Goals,1.74,WINNER,0.6919000000000001
First Half Goals 1.5,Under 1.5 Goals,1.41,LOSER,-1.0
Over/Under 0.5 Goals,Under 0.5 Goals,4.3,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,2.44,WINNER,1.3464
Over/Under 1.5 Goals,Over 1.5 Goals,1.6,WINNER,0.5610000000000002
First Half Goals 0.5,Under 0.5 Goals,1.77,WINNER,0.7199500000000001
First Half Goals 1.5,Under 1.5 Goals,1.88,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.93,LOSER,-1.0
Over/Under 0.5 Goals,Under 0.5 Goals,1.62,WINNER,0.5797000000000001
Over/Under 3.5 Goals,Under 3.5 Goals,1.93,WINNER,0.86955
First Half Goals 0.5,Under 0.5 Goals,1.4,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.31,WINNER,0.28985
First Half Goals 0.5,Under 0.5 Goals,1.23,WINNER,0.21505
Over/Under 1.5 Goals,Under 1.5 Goals,1.75,WINNER,0.70125
Over/Under 2.5 Goals,Under 2.5 Goals,1.24,WINNER,0.2244
Over/Under 3.5 Goals,Under 3.5 Goals,1.1,WINNER,0.0935
First Half Goals 0.5,Under 0.5 Goals,1.13,WINNER,0.1215499999999999
Over/Under 5.5 Goals,Under 5.5 Goals,1.15,WINNER,0.1402499999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.4,WINNER,0.3739999999999999
Over/Under 0.5 Goals,Under 0.5 Goals,3.7,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,1.7,LOSER,-1.0
Over/Under 1.5 Goals,Under 1.5 Goals,4.0,WINNER,2.805
First Half Goals 0.5,Over 0.5 Goals,1.73,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.17,WINNER,0.1589499999999999
First Half Goals 0.5,Over 0.5 Goals,1.97,WINNER,0.90695
Over/Under 0.5 Goals,Under 0.5 Goals,5.6,WINNER,4.301
Over/Under 1.5 Goals,Over 1.5 Goals,1.44,WINNER,0.4114
First Half Goals 0.5,Over 0.5 Goals,1.75,WINNER,0.70125
First Half Goals 0.5,Over 0.5 Goals,1.87,WINNER,0.8134500000000001
Over/Under 0.5 Goals,Over 0.5 Goals,2.02,WINNER,0.9537
First Half Goals 0.5,Under 0.5 Goals,1.75,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.61,WINNER,0.5703500000000001
Over/Under 0.5 Goals,Under 0.5 Goals,2.26,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,2.02,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.8,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.4,WINNER,0.3739999999999999
Over/Under 2.5 Goals,Under 2.5 Goals,2.32,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.27,WINNER,0.25245
First Half Goals 0.5,Under 0.5 Goals,1.5,LOSER,-1.0
Over/Under 2.5 Goals,Under 2.5 Goals,1.06,WINNER,0.0561
Over/Under 4.5 Goals,Over 4.5 Goals,3.3,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.18,WINNER,0.1682999999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.41,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,1.9,WINNER,0.8414999999999999
First Half Goals 0.5,Under 0.5 Goals,1.04,WINNER,0.0374
First Half Goals 1.5,Over 1.5 Goals,2.02,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.73,WINNER,0.68255
Over/Under 4.5 Goals,Over 4.5 Goals,1.47,WINNER,0.43945
Over/Under 2.5 Goals,Over 2.5 Goals,1.33,WINNER,0.3085500000000001
Over/Under 1.5 Goals,Over 1.5 Goals,2.08,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.33,WINNER,0.3085500000000001
First Half Goals 1.5,Under 1.5 Goals,1.42,WINNER,0.3926999999999999
First Half Goals 0.5,Under 0.5 Goals,1.25,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.5,WINNER,0.4675
First Half Goals 0.5,Over 0.5 Goals,2.02,WINNER,0.9537
First Half Goals 0.5,Under 0.5 Goals,1.5,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.98,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.11,WINNER,0.1028500000000001
First Half Goals 0.5,Under 0.5 Goals,1.03,WINNER,0.02805
First Half Goals 0.5,Over 0.5 Goals,2.42,WINNER,1.3277
First Half Goals 0.5,Under 0.5 Goals,1.23,LOSER,-1.0
Over/Under 4.5 Goals,Over 4.5 Goals,4.7,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.71,WINNER,0.66385
Over/Under 2.5 Goals,Over 2.5 Goals,1.67,WINNER,0.62645
Over/Under 1.5 Goals,Under 1.5 Goals,1.54,WINNER,0.5049
First Half Goals 1.5,Under 1.5 Goals,1.46,WINNER,0.4301
First Half Goals 1.5,Under 1.5 Goals,1.13,WINNER,0.1215499999999999
First Half Goals 0.5,Under 0.5 Goals,1.2,WINNER,0.1869999999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.84,WINNER,0.7854000000000001
First Half Goals 0.5,Over 0.5 Goals,1.68,WINNER,0.6358
Over/Under 1.5 Goals,Over 1.5 Goals,1.73,LOSER,-1.0
Over/Under 1.5 Goals,Under 1.5 Goals,1.27,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.73,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.24,WINNER,0.2244
First Half Goals 0.5,Under 0.5 Goals,2.0,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.9,WINNER,0.8414999999999999
First Half Goals 1.5,Under 1.5 Goals,1.33,WINNER,0.3085500000000001
First Half Goals 0.5,Over 0.5 Goals,2.02,WINNER,0.9537
First Half Goals 0.5,Under 0.5 Goals,1.51,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.23,WINNER,0.21505
Over/Under 2.5 Goals,Over 2.5 Goals,1.29,WINNER,0.27115
First Half Goals 1.5,Over 1.5 Goals,2.02,WINNER,0.9537
Over/Under 3.5 Goals,Over 3.5 Goals,1.68,LOSER,-1.0
Over/Under 1.5 Goals,Under 1.5 Goals,1.18,WINNER,0.1682999999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.16,WINNER,0.1495999999999999
First Half Goals 0.5,Under 0.5 Goals,2.3,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.77,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.94,WINNER,0.8789
First Half Goals 0.5,Under 0.5 Goals,1.87,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.14,WINNER,0.1308999999999999
Over/Under 2.5 Goals,Over 2.5 Goals,1.24,LOSER,-1.0
Over/Under 0.5 Goals,Over 0.5 Goals,1.21,WINNER,0.1963499999999999
First Half Goals 1.5,Over 1.5 Goals,1.91,WINNER,0.85085
Over/Under 2.5 Goals,Over 2.5 Goals,1.77,LOSER,-1.0
Over/Under 0.5 Goals,Over 0.5 Goals,1.82,WINNER,0.7667
Over/Under 1.5 Goals,Under 1.5 Goals,2.14,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.16,WINNER,0.1495999999999999
First Half Goals 1.5,Under 1.5 Goals,1.11,WINNER,0.1028500000000001
Over/Under 0.5 Goals,Over 0.5 Goals,1.71,LOSER,-1.0
First Half Goals 1.5,Over 1.5 Goals,1.85,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.48,WINNER,0.4488
First Half Goals 1.5,Under 1.5 Goals,1.87,WINNER,0.8134500000000001
Over/Under 1.5 Goals,Over 1.5 Goals,1.26,WINNER,0.2431
Over/Under 1.5 Goals,Over 1.5 Goals,1.18,WINNER,0.1682999999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.47,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.58,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.46,WINNER,0.4301
Over/Under 1.5 Goals,Over 1.5 Goals,1.83,WINNER,0.7760500000000001
Over/Under 2.5 Goals,Over 2.5 Goals,2.58,LOSER,-1.0
Over/Under 1.5 Goals,Under 1.5 Goals,1.42,LOSER,-1.0
First Half Goals 2.5,Under 2.5 Goals,1.42,WINNER,0.3926999999999999
First Half Goals 0.5,Over 0.5 Goals,2.1,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,2.04,WINNER,0.9724
Over/Under 1.5 Goals,Under 1.5 Goals,1.26,WINNER,0.2431
First Half Goals 0.5,Under 0.5 Goals,1.4,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.95,WINNER,0.88825
First Half Goals 1.5,Under 1.5 Goals,1.32,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.29,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.16,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.3,WINNER,0.2805000000000001
First Half Goals 0.5,Over 0.5 Goals,1.45,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.58,WINNER,0.5423000000000001
Over/Under 2.5 Goals,Under 2.5 Goals,1.76,WINNER,0.7106
Over/Under 0.5 Goals,Over 0.5 Goals,1.54,WINNER,0.5049
Over/Under 4.5 Goals,Under 4.5 Goals,1.63,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.18,WINNER,0.1682999999999999
Over/Under 2.5 Goals,Over 2.5 Goals,1.71,WINNER,0.66385
Over/Under 2.5 Goals,Under 2.5 Goals,1.95,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,2.8,WINNER,1.6829999999999998
Over/Under 3.5 Goals,Under 3.5 Goals,2.2,WINNER,1.1220000000000003
Over/Under 1.5 Goals,Over 1.5 Goals,2.16,LOSER,-1.0
Over/Under 2.5 Goals,Under 2.5 Goals,1.5,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.27,WINNER,0.25245
Over/Under 6.5 Goals,Under 6.5 Goals,2.0,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,2.06,WINNER,0.9911
Over/Under 3.5 Goals,Under 3.5 Goals,1.9,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,2.08,WINNER,1.0098
Over/Under 1.5 Goals,Over 1.5 Goals,1.54,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.4,LOSER,-1.0
Over/Under 3.5 Goals,Under 3.5 Goals,3.35,WINNER,2.1972500000000004
Over/Under 2.5 Goals,Over 2.5 Goals,1.44,WINNER,0.4114
Over/Under 5.5 Goals,Over 5.5 Goals,1.33,WINNER,0.3085500000000001
Over/Under 1.5 Goals,Under 1.5 Goals,1.94,WINNER,0.8789
Over/Under 3.5 Goals,Under 3.5 Goals,1.01,WINNER,0.00935
First Half Goals 1.5,Under 1.5 Goals,1.78,WINNER,0.7293000000000001
First Half Goals 0.5,Over 0.5 Goals,1.63,WINNER,0.58905
First Half Goals 0.5,Over 0.5 Goals,1.75,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,2.38,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.37,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.4,WINNER,0.3739999999999999
First Half Goals 2.5,Under 2.5 Goals,1.01,WINNER,0.00935
Over/Under 1.5 Goals,Under 1.5 Goals,1.93,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.14,WINNER,0.1308999999999999
First Half Goals 0.5,Over 0.5 Goals,1.47,WINNER,0.43945
Over/Under 3.5 Goals,Over 3.5 Goals,1.55,WINNER,0.5142500000000001
Over/Under 1.5 Goals,Over 1.5 Goals,1.1,WINNER,0.0935
First Half Goals 0.5,Under 0.5 Goals,1.4,WINNER,0.3739999999999999
Over/Under 2.5 Goals,Over 2.5 Goals,3.0,LOSER,-1.0
Over/Under 2.5 Goals,Under 2.5 Goals,1.04,WINNER,0.0374
First Half Goals 0.5,Under 0.5 Goals,1.28,WINNER,0.2618
First Half Goals 1.5,Under 1.5 Goals,1.32,WINNER,0.2992000000000001
Over/Under 0.5 Goals,Over 0.5 Goals,1.33,WINNER,0.3085500000000001
Over/Under 0.5 Goals,Over 0.5 Goals,1.31,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,1.38,WINNER,0.3552999999999999
First Half Goals 2.5,Under 2.5 Goals,1.3,WINNER,0.2805000000000001
Over/Under 1.5 Goals,Over 1.5 Goals,1.3,WINNER,0.2805000000000001
First Half Goals 0.5,Under 0.5 Goals,1.65,WINNER,0.6077499999999999
First Half Goals 0.5,Under 0.5 Goals,1.39,WINNER,0.3646499999999999
First Half Goals 0.5,Over 0.5 Goals,1.71,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,1.85,WINNER,0.7947500000000002
First Half Goals 0.5,Under 0.5 Goals,1.72,WINNER,0.6732
Over/Under 0.5 Goals,Under 0.5 Goals,1.66,WINNER,0.6171
Over/Under 0.5 Goals,Under 0.5 Goals,2.92,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,2.5,WINNER,1.4025
Over/Under 1.5 Goals,Under 1.5 Goals,1.31,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.22,WINNER,0.2057
Over/Under 2.5 Goals,Over 2.5 Goals,1.08,WINNER,0.0748
Over/Under 1.5 Goals,Over 1.5 Goals,1.17,WINNER,0.1589499999999999
Over/Under 0.5 Goals,Over 0.5 Goals,1.24,WINNER,0.2244
Over/Under 1.5 Goals,Over 1.5 Goals,1.58,LOSER,-1.0
Over/Under 2.5 Goals,Under 2.5 Goals,1.66,LOSER,-1.0
Over/Under 3.5 Goals,Under 3.5 Goals,1.47,WINNER,0.43945
Over/Under 3.5 Goals,Over 3.5 Goals,1.12,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,2.16,LOSER,-1.0
Over/Under 1.5 Goals,Under 1.5 Goals,3.25,WINNER,2.10375
Over/Under 1.5 Goals,Under 1.5 Goals,3.1,WINNER,1.9635
Over/Under 0.5 Goals,Over 0.5 Goals,1.48,WINNER,0.4488
Over/Under 2.5 Goals,Over 2.5 Goals,1.21,WINNER,0.1963499999999999
Over/Under 3.5 Goals,Over 3.5 Goals,1.23,WINNER,0.21505
Over/Under 2.5 Goals,Under 2.5 Goals,4.7,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.29,WINNER,0.27115
Over/Under 3.5 Goals,Under 3.5 Goals,1.05,WINNER,0.04675
First Half Goals 0.5,Under 0.5 Goals,2.04,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.15,WINNER,0.1402499999999999
Over/Under 3.5 Goals,Over 3.5 Goals,1.43,WINNER,0.4020499999999999
Over/Under 0.5 Goals,Over 0.5 Goals,1.49,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.28,WINNER,0.2618
Over/Under 1.5 Goals,Under 1.5 Goals,5.4,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.45,WINNER,0.4207499999999999
Over/Under 1.5 Goals,Under 1.5 Goals,1.19,WINNER,0.1776499999999999
Over/Under 2.5 Goals,Under 2.5 Goals,1.38,WINNER,0.3552999999999999
First Half Goals 0.5,Under 0.5 Goals,1.42,LOSER,-1.0
Over/Under 0.5 Goals,Over 0.5 Goals,1.33,LOSER,-1.0
Over/Under 0.5 Goals,Over 0.5 Goals,1.3,WINNER,0.2805000000000001
Over/Under 3.5 Goals,Over 3.5 Goals,1.28,WINNER,0.2618
Over/Under 1.5 Goals,Under 1.5 Goals,2.94,LOSER,-1.0
Over/Under 2.5 Goals,Under 2.5 Goals,1.95,LOSER,-1.0
Over/Under 0.5 Goals,Under 0.5 Goals,1.87,WINNER,0.8134500000000001
Over/Under 1.5 Goals,Under 1.5 Goals,1.27,WINNER,0.25245
Over/Under 4.5 Goals,Over 4.5 Goals,1.09,WINNER,0.08415
Over/Under 3.5 Goals,Under 3.5 Goals,1.01,WINNER,0.00935
Over/Under 1.5 Goals,Over 1.5 Goals,2.14,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.02,WINNER,0.0187
Over/Under 4.5 Goals,Over 4.5 Goals,1.17,WINNER,0.1589499999999999
Over/Under 5.5 Goals,Over 5.5 Goals,1.24,WINNER,0.2244
First Half Goals 0.5,Under 0.5 Goals,1.4,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.35,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,1.51,LOSER,-1.0
First Half Goals 1.5,Over 1.5 Goals,2.62,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.81,WINNER,0.7573500000000001
Over/Under 0.5 Goals,Over 0.5 Goals,1.2,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.36,WINNER,0.3366000000000001
First Half Goals 0.5,Over 0.5 Goals,1.63,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.8,WINNER,0.7480000000000001
First Half Goals 0.5,Under 0.5 Goals,1.68,WINNER,0.6358
Over/Under 0.5 Goals,Under 0.5 Goals,1.27,WINNER,0.25245
Over/Under 1.5 Goals,Over 1.5 Goals,1.59,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.59,WINNER,0.5516500000000001
Over/Under 2.5 Goals,Over 2.5 Goals,1.62,WINNER,0.5797000000000001
Over/Under 0.5 Goals,Over 0.5 Goals,1.64,LOSER,-1.0
Over/Under 4.5 Goals,Over 4.5 Goals,1.34,WINNER,0.3179
Over/Under 4.5 Goals,Over 4.5 Goals,1.34,WINNER,0.3179
First Half Goals 0.5,Under 0.5 Goals,1.6,WINNER,0.5610000000000002
Over/Under 1.5 Goals,Over 1.5 Goals,2.1,LOSER,-1.0
Over/Under 5.5 Goals,Under 5.5 Goals,1.52,LOSER,-1.0
Over/Under 2.5 Goals,Under 2.5 Goals,1.19,WINNER,0.1776499999999999
Over/Under 0.5 Goals,Over 0.5 Goals,1.2,WINNER,0.1869999999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.1,WINNER,0.0935
Over/Under 3.5 Goals,Under 3.5 Goals,1.27,LOSER,-1.0
Over/Under 7.5 Goals,Under 7.5 Goals,1.38,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.87,LOSER,-1.0
Over/Under 1.5 Goals,Under 1.5 Goals,2.14,LOSER,-1.0
Over/Under 3.5 Goals,Under 3.5 Goals,1.22,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.5,WINNER,0.4675
First Half Goals 0.5,Under 0.5 Goals,1.12,WINNER,0.1122000000000001
First Half Goals 2.5,Under 2.5 Goals,1.27,WINNER,0.25245
First Half Goals 0.5,Under 0.5 Goals,1.21,WINNER,0.1963499999999999
Over/Under 2.5 Goals,Over 2.5 Goals,1.19,WINNER,0.1776499999999999
Over/Under 2.5 Goals,Over 2.5 Goals,1.25,WINNER,0.23375
Over/Under 2.5 Goals,Under 2.5 Goals,2.48,LOSER,-1.0
First Half Goals 1.5,Over 1.5 Goals,1.72,WINNER,0.6732
First Half Goals 0.5,Over 0.5 Goals,1.62,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,1.52,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,2.38,WINNER,1.2903
First Half Goals 0.5,Under 0.5 Goals,1.82,WINNER,0.7667
First Half Goals 0.5,Under 0.5 Goals,2.24,WINNER,1.1594000000000002
Over/Under 0.5 Goals,Over 0.5 Goals,1.28,WINNER,0.2618
Over/Under 0.5 Goals,Under 0.5 Goals,4.0,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,1.77,LOSER,-1.0
Over/Under 0.5 Goals,Under 0.5 Goals,3.15,WINNER,2.01025
Over/Under 0.5 Goals,Over 0.5 Goals,1.66,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.15,WINNER,0.1402499999999999
First Half Goals 0.5,Under 0.5 Goals,2.42,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.28,LOSER,-1.0
Over/Under 2.5 Goals,Under 2.5 Goals,1.28,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.55,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.51,WINNER,0.47685
First Half Goals 1.5,Under 1.5 Goals,1.32,LOSER,-1.0
Over/Under 3.5 Goals,Under 3.5 Goals,1.33,WINNER,0.3085500000000001
First Half Goals 0.5,Under 0.5 Goals,2.18,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.82,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,1.63,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.29,WINNER,0.27115
First Half Goals 1.5,Over 1.5 Goals,1.47,WINNER,0.43945
First Half Goals 1.5,Over 1.5 Goals,1.76,WINNER,0.7106
First Half Goals 0.5,Under 0.5 Goals,1.3,WINNER,0.2805000000000001
First Half Goals 2.5,Under 2.5 Goals,1.54,WINNER,0.5049
Over/Under 4.5 Goals,Over 4.5 Goals,1.12,WINNER,0.1122000000000001
Over/Under 0.5 Goals,Over 0.5 Goals,1.28,LOSER,-1.0
Over/Under 4.5 Goals,Over 4.5 Goals,1.16,WINNER,0.1495999999999999
Over/Under 0.5 Goals,Under 0.5 Goals,1.95,WINNER,0.88825
Over/Under 0.5 Goals,Under 0.5 Goals,2.36,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.57,WINNER,0.53295
First Half Goals 0.5,Under 0.5 Goals,1.44,LOSER,-1.0
Over/Under 3.5 Goals,Under 3.5 Goals,1.86,WINNER,0.8041000000000001
Over/Under 2.5 Goals,Under 2.5 Goals,2.18,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.22,WINNER,0.2057
Over/Under 0.5 Goals,Over 0.5 Goals,1.23,WINNER,0.21505
First Half Goals 0.5,Over 0.5 Goals,2.26,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.45,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.58,WINNER,0.5423000000000001
First Half Goals 1.5,Under 1.5 Goals,1.3,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.25,WINNER,0.23375
First Half Goals 0.5,Over 0.5 Goals,1.32,WINNER,0.2992000000000001
Over/Under 0.5 Goals,Under 0.5 Goals,1.22,WINNER,0.2057
First Half Goals 1.5,Over 1.5 Goals,2.44,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.21,LOSER,-1.0
Over/Under 0.5 Goals,Over 0.5 Goals,1.29,WINNER,0.27115
Over/Under 0.5 Goals,Over 0.5 Goals,1.72,WINNER,0.6732
Over/Under 0.5 Goals,Under 0.5 Goals,2.5,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.99,WINNER,0.92565
Over/Under 3.5 Goals,Over 3.5 Goals,1.65,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.54,WINNER,0.5049
Over/Under 1.5 Goals,Under 1.5 Goals,1.67,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.43,WINNER,0.4020499999999999
First Half Goals 0.5,Under 0.5 Goals,1.46,WINNER,0.4301
First Half Goals 1.5,Under 1.5 Goals,1.2,WINNER,0.1869999999999999
First Half Goals 2.5,Under 2.5 Goals,1.16,WINNER,0.1495999999999999
Over/Under 4.5 Goals,Over 4.5 Goals,1.16,WINNER,0.1495999999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.23,WINNER,0.21505
Over/Under 1.5 Goals,Over 1.5 Goals,1.24,WINNER,0.2244
Over/Under 4.5 Goals,Over 4.5 Goals,1.19,WINNER,0.1776499999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.61,WINNER,0.5703500000000001
Over/Under 3.5 Goals,Over 3.5 Goals,1.47,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.83,WINNER,0.7760500000000001
Over/Under 0.5 Goals,Under 0.5 Goals,2.0,WINNER,0.935
First Half Goals 2.5,Under 2.5 Goals,1.46,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,2.36,WINNER,1.2716
Over/Under 3.5 Goals,Under 3.5 Goals,1.6,LOSER,-1.0
Over/Under 3.5 Goals,Under 3.5 Goals,1.68,WINNER,0.6358
Over/Under 2.5 Goals,Under 2.5 Goals,1.52,WINNER,0.4862
Over/Under 3.5 Goals,Under 3.5 Goals,1.3,WINNER,0.2805000000000001
First Half Goals 0.5,Over 0.5 Goals,1.82,WINNER,0.7667
Over/Under 1.5 Goals,Under 1.5 Goals,4.1,LOSER,-1.0
First Half Goals 0.5,Over 0.5 Goals,1.75,WINNER,0.70125
First Half Goals 0.5,Under 0.5 Goals,1.48,LOSER,-1.0
Over/Under 0.5 Goals,Under 0.5 Goals,1.53,LOSER,-1.0
Over/Under 4.5 Goals,Under 4.5 Goals,1.22,WINNER,0.2057
First Half Goals 1.5,Under 1.5 Goals,1.86,WINNER,0.8041000000000001
Over/Under 0.5 Goals,Under 0.5 Goals,1.73,WINNER,0.68255
Over/Under 0.5 Goals,Under 0.5 Goals,1.2,WINNER,0.1869999999999999
Over/Under 2.5 Goals,Under 2.5 Goals,1.58,WINNER,0.5423000000000001
First Half Goals 0.5,Under 0.5 Goals,1.28,WINNER,0.2618
Over/Under 2.5 Goals,Over 2.5 Goals,1.28,LOSER,-1.0
Over/Under 3.5 Goals,Over 3.5 Goals,1.23,WINNER,0.21505
Over/Under 2.5 Goals,Over 2.5 Goals,1.31,WINNER,0.28985
Over/Under 1.5 Goals,Under 1.5 Goals,3.05,LOSER,-1.0
Over/Under 0.5 Goals,Over 0.5 Goals,1.16,WINNER,0.1495999999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.24,WINNER,0.2244
Over/Under 0.5 Goals,Over 0.5 Goals,1.21,WINNER,0.1963499999999999
Over/Under 0.5 Goals,Over 0.5 Goals,1.45,WINNER,0.4207499999999999
Over/Under 0.5 Goals,Over 0.5 Goals,1.4,LOSER,-1.0
Over/Under 0.5 Goals,Over 0.5 Goals,2.2,LOSER,-1.0
Over/Under 2.5 Goals,Over 2.5 Goals,1.56,WINNER,0.5236000000000001
Over/Under 0.5 Goals,Over 0.5 Goals,2.74,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.18,WINNER,0.1682999999999999
Over/Under 1.5 Goals,Over 1.5 Goals,1.21,WINNER,0.1963499999999999
First Half Goals 1.5,Under 1.5 Goals,2.18,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.05,WINNER,0.04675
Over/Under 1.5 Goals,Over 1.5 Goals,1.27,WINNER,0.25245
First Half Goals 0.5,Over 0.5 Goals,1.89,WINNER,0.83215
First Half Goals 0.5,Over 0.5 Goals,1.8,LOSER,-1.0
Over/Under 1.5 Goals,Over 1.5 Goals,1.9,LOSER,-1.0
Over/Under 0.5 Goals,Under 0.5 Goals,1.75,WINNER,0.70125
Over/Under 1.5 Goals,Over 1.5 Goals,1.3,WINNER,0.2805000000000001
First Half Goals 1.5,Over 1.5 Goals,1.7,WINNER,0.6545
First Half Goals 0.5,Over 0.5 Goals,1.78,LOSER,-1.0
First Half Goals 1.5,Under 1.5 Goals,1.84,LOSER,-1.0
First Half Goals 0.5,Under 0.5 Goals,1.97,WINNER,0.90695
Over/Under 1.5 Goals,Over 1.5 Goals,1.23,WINNER,0.21505
Over/Under 2.5 Goals,Over 2.5 Goals,1.3,LOSER,-1.0
First Half Goals 2.5,Under 2.5 Goals,1.3,WINNER,0.2805000000000001
Over/Under 1.5 Goals,Over 1.5 Goals,1.38,LOSER,-1.0
Over/Under 1.5 Goals,Under 1.5 Goals,2.2,LOSER,-1.0
This CSV file contains the results of my investments.
The column containing the profit/loss is the column named back
And I want to test it like this:
Let's assume that I want to see the total profit only for the investments I would make if according to some filters this investment pattern was profitable in previous records.
Example:
The 51st investment is 'market_name' → Over/Under 2.5 Goals, 'runner_name' → Under 2.5 Goals and 'odds' → 1.24
So I want to sum the profit/loss of 50th previous investments if they have these same filters, if the sum of these filters is greater than zero, then I make investment 51.
And so on in each of the lines, 100th investment, I see if the previous 99 investments filtering the cited options will be profitable, if so, I add the back column of 100th to the list for final sum.
So I created this code:
import pandas as pd
df = pd.read_csv(test.csv')
df = df[df['result'].notnull()]
matches = []
for number in range(len(df)):
try:
dfilter = df[:number]
filter = dfilter[(dfilter['market_name'] == df['market_name'][number+1]) & (dfilter['runner_name'] == df['runner_name'][number+1]) & (dfilter['odds'] == df['odds'][number+1])]
back_sum = filter['back'].sum()
if back_sum > 0:
matches.append(df['back'][number+1])
except:
pass
print(sum(matches))
But the final sum is delivering a result that does not match my real results where I invest.
I can't find where the flaw is in the code because it looks correct to me visually.
| [
"Slice df[:number] means to take elements up to number. And when referring to the current line, you must use number, not number+1. This can be checked, for example, print df[:3] and get all the lines up to the third one.\nBut if you use loc, then operations through the slice will not be up to, but inclusive (you should not forget about this). That is, with df.loc[:3, :] rows will be selected, including the third one.\nThat is, you need this:\nfor number in range(len(df)):\n try:\n dfilter = df[:number]\n filter = dfilter[(dfilter['market_name'] == df['market_name'][number]) &\n (dfilter['runner_name'] == df['runner_name'][number]) & (dfilter['odds'] == df['odds'][number])]\n\n back_sum = filter['back'].sum()\n if back_sum > 0:\n matches.append(df['back'][number])\n except:\n pass\n\nIf the dataframe is large, the loop will be slow. I can recommend List comprehension, which is many times faster than a loop. Below I made a column 'invest', where, depending on back_sum, the values will be True or False.\ndf = pd.read_csv('test.csv')\n\n#df = df[df['result'].notnull()].reset_index(drop=True)\n\"\"\"\nis whether all indexes of the original dataframe are needed,\nif not, then you can add this line and use the filtered dataframe(if you need a filtered dataframe,\nuncomment this line df = df[df['result'].notnull()].reset_index(drop=True))\n\"\"\"\n\ndef my_func(i):\n dfilter = df[:i]\n filter = dfilter[(dfilter['market_name'] == df['market_name'][i]) &\n (dfilter['runner_name'] == df['runner_name'][i]) & (dfilter['odds'] == df['odds'][i])]\n back_sum = filter['back'].sum()\n aaa = True\n if back_sum <= 0:\n aaa = False\n\n return aaa\n\ndf['invest'] = [my_func(i) for i in range(len(df))]\n\n"
] | [
1
] | [] | [] | [
"pandas",
"python"
] | stackoverflow_0074671716_pandas_python.txt |
Q:
What is a Future and how do I use it?
I get the following error:
A value of type 'Future<int>' can't be assigned to a variable of type 'int'
It might be another type instead of int, but basically the pattern is:
A value of type 'Future<T>' can't be assigned to a variable of type 'T'
So:
What exactly is a Future?
How do I get the actual value I want to get?
What widget do I use to display my value when all I have is a Future<T>?
A:
In case you are familiar with Task<T> or Promise<T> and the async/ await pattern, then you can skip right to the "How to use a Future with the widgets in Flutter" section.
What is a Future and how do I use it?
Well, the documentation says:
An object representing a delayed computation.
That is correct. It's also a little abstract and dry. Normally, a function returns a result. Sequentially. The function is called, runs and returns it's result. Until then, the caller waits. Some functions, especially when they access resources like hardware or network, take a little time to do so. Imagine an avatar picture being loaded from a web server, a user's data being loaded from a database or just the texts of the app in multiple languages being loaded from device memory. That might be slow.
Most applications by default have a single flow of control. When this flow is blocked, for example by waiting for a computation or resource access that takes time, the application just freezes. You may remember this as standard if you are old enough, but in today's world that would be seen as a bug. Even if something takes time, we get a little animation. A spinner, an hourglass, maybe a progress bar. But how can an application run and show an animation and yet still wait for the result? The answer is: asynchronous operations. Operations that still run while your code waits for something. Now how does the compiler know, whether it should actually stop everything and wait for a result or continue with all the background work and wait only in this instance? Well, it cannot figure that out on it's own. We have to tell it.
This is achieved through a pattern known as async and await. It's not specific to flutter or dart, it exists under the same name in many other languages. You can find the documentation for Dart here.
Since a method that takes some time cannot return immediately, it will return the promise of delivering a value when it's done.
That is called a Future. So the promise to load a number from the database would return a Future<int> while the promise to return a list of movies from an internet search might return a Future<List<Movie>>. A Future<T> is something that in the future will give you a T.
Lets try a different explanation:
A future represents the result of an asynchronous operation, and can have two states: uncompleted or completed.
Most likely, as you aren't doing this just for fun, you actually need the results of that Future<T> to progress in your application. You need to display the number from the database or the list of movies found. So you want to wait, until the result is there. This is where await comes in:
Future<List<Movie>> result = loadMoviesFromSearch(input);
// right here, you need the result. So you wait for it:
List<Movie> movies = await result;
But wait, haven't we come full circle? Aren't we waiting on the result again? Yes, indeed we are. Programs would be utterly chaotic if they did not have some resemblence of sequential flow. But the point is that using the keyword await we have told the compiler, that at this point, while we want to wait for the result, we do not want our application to just freeze. We want all the other running operations like for example animations to continue.
However, you can only use the awaitkeyword in functions that themselves are marked as async and return a Future<T>. Because when you await something, then the function that is awaiting can no longer return their result immediately. You can only return what you have, if you have to wait for it, you have to return a promise to deliver it later.
Future<Pizza> getPizza() async {
Future<PizzaBox> delivery = orderPizza();
var pizzaBox = await delivery;
var pizza = pizzaBox.unwrap();
return pizza;
}
Our getPizza function has to wait for the pizza, so instead of returning Pizza immediately, it has to return the promise that a pizza will be there in the future. Now you can, in turn, await the getPizza function somewhere.
How to use a Future with the widgets in Flutter?
All the widgets in flutter expect real values. Not some promise of a value to come at a later time. When a button needs a text, it cannot use a promise that text will come later. It needs to display the button now, so it needs the text now.
But sometimes, all you have is a Future<T>. That is where FutureBuilder comes in. You can use it when you have a future, to display one thing while you are waiting for it (for example a progress indicator) and another thing when it's done (for example the result).
Let's take a look at our pizza example. You want to order pizza, you want a progress indicator while you wait for it, you want to see the result once it's delivered, and maybe show an error message when there is an error:
import 'package:flutter/material.dart';
void main() {
runApp(MyApp());
}
/// ordering a pizza takes 5 seconds and then gives you a pizza salami with extra cheese
Future<String> orderPizza() {
return Future<String>.delayed(const Duration(seconds: 5), () async => 'Pizza Salami, Extra Cheese');
}
class MyApp extends StatelessWidget {
@override
Widget build(BuildContext context) {
return MaterialApp(
theme: ThemeData.dark(),
home: Scaffold(
body: Center(
child: PizzaOrder(),
),
),
);
}
}
class PizzaOrder extends StatefulWidget {
@override
_PizzaOrderState createState() => _PizzaOrderState();
}
class _PizzaOrderState extends State<PizzaOrder> {
Future<String>? delivery;
@override
Widget build(BuildContext context) {
return Column(
crossAxisAlignment: CrossAxisAlignment.center,
mainAxisAlignment: MainAxisAlignment.spaceEvenly,
children: [
ElevatedButton(
onPressed: delivery != null ? null : () => setState(() { delivery = orderPizza(); }),
child: const Text('Order Pizza Now')
),
delivery == null
? const Text('No delivery scheduled')
: FutureBuilder(
future: delivery,
builder: (context, snapshot) {
if(snapshot.hasData) {
return Text('Delivery done: ${snapshot.data}');
} else if(snapshot.hasError) {
return Text('Delivery error: ${snapshot.error.toString()}');
} else {
return const CircularProgressIndicator();
}
})
]);
}
}
This is how you use a FutureBuilder to display the result of your future once you have it.
A:
Here's a list of analogies to Dart's Future from other languages:
JS: Promise
Java: Future
Python: Future
C#: Task
Just like in other languages Future is a special type of object which allows to use async/await syntax sugar, write asynchronous code in synchronous/linear way. You return Future from an async method rather than accept a callback as a parameter and avoid the callback hell - both Futures and callbacks solve same problems (firing some code at a latter time) but in a different way.
A:
Future<T> returning the potential value which will be done by async work
Eg:
Future<int> getValue() async {
return Future.value(5);
}
Above code is returning Future.value(5) which is of int type, but while receiving the value from method we can't use type Future<int> i.e
Future<int> value = await getValue(); // Not Allowed
// Error
A value of type 'Future<int>' can't be assigned to a variable of type 'int'
To solve above getValue() should be received under int type
int value = await getValue(); // right way as it returning the potential value.
A:
I hope this key point will be informative, I show it in two different Async methods:
Note the following method where showLoading(), getAllCarsFromApi() and hideLoading() are inner Async methods.
If I put the await keyword before showLoading(), the Operation waits until it's done then goes to the next line but I intentionally removed the await because I need my Loading dialog be displayed simultaneously with getAllCarsFromApi() is being processed, so it means showLoading() and getAllCarsFromApi() methods are processed on different Threads. Finally hideLoading() hides the loading dialog.
Future<List<Car>> getData() async{
showLoading();
final List<Car> cars = await getAllCarsFromApi();
hideLoading();
return cars;
}
Now look at this another Async method, here the getCarByIdFromApi() method needs an id which is calculated from the getCarIdFromDatabase(), so there must be an await keyword before the first method to make the Operation wait until id is calculated and passed to the second method. So here two methods are processed one after another and in a single Thread.
Future<Car> getCar() async{
int id = await getCarIdFromDatabase();
final Car car = await getCarByIdFromApi(id);
return car;
}
A:
A simple answer is that if a function returns its value with a delay of some time, Future is used to get its value.
Future<int> calculate({required int val1, required int val2}) async {
await Future.delayed(const Duration(seconds: 2));
return val1 + val2;
}
if we call the above function as
getTotal() async {
int result = calculate(val1: 5, val2: 5);
print(result);
}
we will get the following error:
A value of type 'Future<int>' can't be assigned to a variable of type 'int'
but if we use await before function call it will give the actual returned value from the function after a delay
getTotal() async {
int result = await calculate(val1: 5, val2: 5);
print(result);
}
the keyword async is required to use await for the Future to get returned value
A:
In Flutter, a Future represents a potential value or error that will be available at some time in the future. It is a way of managing the results of asynchronous operations, such as network requests or long-running operations.
A Future can be in one of three states:
Uncompleted: the Future has not yet been completed, and its value or error is not yet available.
Completed: the Future has been completed, and its value or error is available.
Cancelled: the Future has been canceled, and its value or error will never be available.
To use a Future, you need to do the following:
Create a Future by calling the Future() constructor and providing a function that returns the value or error that the Future will represent.
Use the await keyword to wait for the Future to complete and provide a result.
Use the then() method to specify what should happen when the Future completes successfully.
To use a Future in your Flutter app, you need to first create a Future object that represents the asynchronous operation you want to perform. This can be done using the Future constructor or one of the methods provided by the Future class.
Here is an example of how you can create a Future object:
Future<int> future = Future(() => someOperations());
Once you have created a Future object, you can use the then() method to specify what should happen when the Future completes. This method takes a function as an argument, and this function will be called with the value or error from the Future when it completes.
Here is an example of how you can use the then() method:
future.then((value) {
print('The result of the operation is: $value');
});
The then() method returns another Future object, which you can use to chain multiple operations together. This allows you to perform multiple asynchronous operations in sequence and handle their results in a sequential manner.
Here is an example of how you can chain multiple operations together using the then() method:
future.then((value) {
print('The result of the first operation is: $value');
return someOtherOperations();
}).then((value) {
print('The result of the second operation is: $value');
});
Here is another example of how you can use a Future in your Flutter app:
Future<String> getData() async {
final result = await Future.delayed(Duration(seconds: 1), () => true);
if (result) {
return 'This is the result';
} else {
throw 'This is the error';
}
}
getData().then((result) {
print(result);
}).catchError((error) {
print(error);
});
| What is a Future and how do I use it? | I get the following error:
A value of type 'Future<int>' can't be assigned to a variable of type 'int'
It might be another type instead of int, but basically the pattern is:
A value of type 'Future<T>' can't be assigned to a variable of type 'T'
So:
What exactly is a Future?
How do I get the actual value I want to get?
What widget do I use to display my value when all I have is a Future<T>?
| [
"In case you are familiar with Task<T> or Promise<T> and the async/ await pattern, then you can skip right to the \"How to use a Future with the widgets in Flutter\" section.\nWhat is a Future and how do I use it?\nWell, the documentation says:\n\nAn object representing a delayed computation.\n\nThat is correct. It's also a little abstract and dry. Normally, a function returns a result. Sequentially. The function is called, runs and returns it's result. Until then, the caller waits. Some functions, especially when they access resources like hardware or network, take a little time to do so. Imagine an avatar picture being loaded from a web server, a user's data being loaded from a database or just the texts of the app in multiple languages being loaded from device memory. That might be slow.\nMost applications by default have a single flow of control. When this flow is blocked, for example by waiting for a computation or resource access that takes time, the application just freezes. You may remember this as standard if you are old enough, but in today's world that would be seen as a bug. Even if something takes time, we get a little animation. A spinner, an hourglass, maybe a progress bar. But how can an application run and show an animation and yet still wait for the result? The answer is: asynchronous operations. Operations that still run while your code waits for something. Now how does the compiler know, whether it should actually stop everything and wait for a result or continue with all the background work and wait only in this instance? Well, it cannot figure that out on it's own. We have to tell it.\nThis is achieved through a pattern known as async and await. It's not specific to flutter or dart, it exists under the same name in many other languages. You can find the documentation for Dart here.\nSince a method that takes some time cannot return immediately, it will return the promise of delivering a value when it's done.\nThat is called a Future. So the promise to load a number from the database would return a Future<int> while the promise to return a list of movies from an internet search might return a Future<List<Movie>>. A Future<T> is something that in the future will give you a T.\nLets try a different explanation:\n\nA future represents the result of an asynchronous operation, and can have two states: uncompleted or completed.\n\nMost likely, as you aren't doing this just for fun, you actually need the results of that Future<T> to progress in your application. You need to display the number from the database or the list of movies found. So you want to wait, until the result is there. This is where await comes in:\nFuture<List<Movie>> result = loadMoviesFromSearch(input);\n\n// right here, you need the result. So you wait for it:\nList<Movie> movies = await result;\n\nBut wait, haven't we come full circle? Aren't we waiting on the result again? Yes, indeed we are. Programs would be utterly chaotic if they did not have some resemblence of sequential flow. But the point is that using the keyword await we have told the compiler, that at this point, while we want to wait for the result, we do not want our application to just freeze. We want all the other running operations like for example animations to continue.\nHowever, you can only use the awaitkeyword in functions that themselves are marked as async and return a Future<T>. Because when you await something, then the function that is awaiting can no longer return their result immediately. You can only return what you have, if you have to wait for it, you have to return a promise to deliver it later.\nFuture<Pizza> getPizza() async {\n Future<PizzaBox> delivery = orderPizza(); \n\n var pizzaBox = await delivery;\n\n var pizza = pizzaBox.unwrap();\n \n return pizza; \n}\n\nOur getPizza function has to wait for the pizza, so instead of returning Pizza immediately, it has to return the promise that a pizza will be there in the future. Now you can, in turn, await the getPizza function somewhere.\nHow to use a Future with the widgets in Flutter?\nAll the widgets in flutter expect real values. Not some promise of a value to come at a later time. When a button needs a text, it cannot use a promise that text will come later. It needs to display the button now, so it needs the text now.\nBut sometimes, all you have is a Future<T>. That is where FutureBuilder comes in. You can use it when you have a future, to display one thing while you are waiting for it (for example a progress indicator) and another thing when it's done (for example the result).\nLet's take a look at our pizza example. You want to order pizza, you want a progress indicator while you wait for it, you want to see the result once it's delivered, and maybe show an error message when there is an error:\nimport 'package:flutter/material.dart';\n\nvoid main() {\n runApp(MyApp());\n}\n\n/// ordering a pizza takes 5 seconds and then gives you a pizza salami with extra cheese\nFuture<String> orderPizza() {\n return Future<String>.delayed(const Duration(seconds: 5), () async => 'Pizza Salami, Extra Cheese');\n}\n\nclass MyApp extends StatelessWidget {\n @override\n Widget build(BuildContext context) {\n return MaterialApp(\n theme: ThemeData.dark(),\n home: Scaffold(\n body: Center(\n child: PizzaOrder(),\n ),\n ),\n );\n }\n}\n\nclass PizzaOrder extends StatefulWidget {\n @override\n _PizzaOrderState createState() => _PizzaOrderState();\n}\n\nclass _PizzaOrderState extends State<PizzaOrder> {\n Future<String>? delivery;\n\n @override\n Widget build(BuildContext context) {\n return Column(\n crossAxisAlignment: CrossAxisAlignment.center,\n mainAxisAlignment: MainAxisAlignment.spaceEvenly,\n children: [\n ElevatedButton(\n onPressed: delivery != null ? null : () => setState(() { delivery = orderPizza(); }),\n child: const Text('Order Pizza Now')\n ),\n delivery == null\n ? const Text('No delivery scheduled')\n : FutureBuilder(\n future: delivery,\n builder: (context, snapshot) {\n if(snapshot.hasData) {\n return Text('Delivery done: ${snapshot.data}');\n } else if(snapshot.hasError) {\n return Text('Delivery error: ${snapshot.error.toString()}');\n } else {\n return const CircularProgressIndicator();\n }\n })\n ]);\n }\n}\n\nThis is how you use a FutureBuilder to display the result of your future once you have it.\n",
"Here's a list of analogies to Dart's Future from other languages:\n\nJS: Promise\nJava: Future\nPython: Future\nC#: Task\n\nJust like in other languages Future is a special type of object which allows to use async/await syntax sugar, write asynchronous code in synchronous/linear way. You return Future from an async method rather than accept a callback as a parameter and avoid the callback hell - both Futures and callbacks solve same problems (firing some code at a latter time) but in a different way.\n",
"Future<T> returning the potential value which will be done by async work\nEg:\nFuture<int> getValue() async {\n return Future.value(5);\n }\n\nAbove code is returning Future.value(5) which is of int type, but while receiving the value from method we can't use type Future<int> i.e\nFuture<int> value = await getValue(); // Not Allowed\n// Error\nA value of type 'Future<int>' can't be assigned to a variable of type 'int'\n\nTo solve above getValue() should be received under int type\n int value = await getValue(); // right way as it returning the potential value. \n\n",
"I hope this key point will be informative, I show it in two different Async methods:\nNote the following method where showLoading(), getAllCarsFromApi() and hideLoading() are inner Async methods.\nIf I put the await keyword before showLoading(), the Operation waits until it's done then goes to the next line but I intentionally removed the await because I need my Loading dialog be displayed simultaneously with getAllCarsFromApi() is being processed, so it means showLoading() and getAllCarsFromApi() methods are processed on different Threads. Finally hideLoading() hides the loading dialog.\nFuture<List<Car>> getData() async{\n showLoading();\n final List<Car> cars = await getAllCarsFromApi();\n hideLoading();\n return cars;\n}\n\nNow look at this another Async method, here the getCarByIdFromApi() method needs an id which is calculated from the getCarIdFromDatabase(), so there must be an await keyword before the first method to make the Operation wait until id is calculated and passed to the second method. So here two methods are processed one after another and in a single Thread.\nFuture<Car> getCar() async{\n int id = await getCarIdFromDatabase();\n final Car car = await getCarByIdFromApi(id);\n return car;\n}\n\n",
"A simple answer is that if a function returns its value with a delay of some time, Future is used to get its value.\nFuture<int> calculate({required int val1, required int val2}) async {\n await Future.delayed(const Duration(seconds: 2));\n return val1 + val2;\n }\n\nif we call the above function as\ngetTotal() async { \n\n int result = calculate(val1: 5, val2: 5);\n\n print(result);\n }\n\nwe will get the following error:\nA value of type 'Future<int>' can't be assigned to a variable of type 'int'\n\n\nbut if we use await before function call it will give the actual returned value from the function after a delay\ngetTotal() async { \n\n int result = await calculate(val1: 5, val2: 5);\n\n print(result);\n }\n\n\nthe keyword async is required to use await for the Future to get returned value\n",
"In Flutter, a Future represents a potential value or error that will be available at some time in the future. It is a way of managing the results of asynchronous operations, such as network requests or long-running operations.\nA Future can be in one of three states:\n\nUncompleted: the Future has not yet been completed, and its value or error is not yet available.\nCompleted: the Future has been completed, and its value or error is available.\nCancelled: the Future has been canceled, and its value or error will never be available.\n\nTo use a Future, you need to do the following:\n\nCreate a Future by calling the Future() constructor and providing a function that returns the value or error that the Future will represent.\nUse the await keyword to wait for the Future to complete and provide a result.\nUse the then() method to specify what should happen when the Future completes successfully.\n\nTo use a Future in your Flutter app, you need to first create a Future object that represents the asynchronous operation you want to perform. This can be done using the Future constructor or one of the methods provided by the Future class.\nHere is an example of how you can create a Future object:\nFuture<int> future = Future(() => someOperations());\n\nOnce you have created a Future object, you can use the then() method to specify what should happen when the Future completes. This method takes a function as an argument, and this function will be called with the value or error from the Future when it completes.\nHere is an example of how you can use the then() method:\nfuture.then((value) {\n print('The result of the operation is: $value');\n});\n\nThe then() method returns another Future object, which you can use to chain multiple operations together. This allows you to perform multiple asynchronous operations in sequence and handle their results in a sequential manner.\nHere is an example of how you can chain multiple operations together using the then() method:\nfuture.then((value) {\n print('The result of the first operation is: $value');\n return someOtherOperations();\n}).then((value) {\n print('The result of the second operation is: $value');\n});\n\nHere is another example of how you can use a Future in your Flutter app:\nFuture<String> getData() async {\n final result = await Future.delayed(Duration(seconds: 1), () => true);\n if (result) {\n return 'This is the result';\n } else {\n throw 'This is the error';\n }\n}\n\ngetData().then((result) {\n print(result);\n}).catchError((error) {\n print(error);\n});\n\n"
] | [
48,
10,
9,
7,
3,
0
] | [] | [] | [
"dart",
"flutter",
"future"
] | stackoverflow_0063017280_dart_flutter_future.txt |
Q:
How to display the latest line based on the file's name or the line's position in bash
I have a tricky question about how to keep the latest log data as my server reposted it two times
This is the result after I grep from my folder :(i have tons of data, just to keep it simpler)
...
20150630-201427.csv:20150630,CFIIASU,233,96.21786,0.44644,
20150630-201427.csv:20150630,CFIIASU_AU,65,90.71109,0.28569
20150630-201427.csv:20150630,CFIIASU_CN,68,102.19569,0.10692
20150630-201427.csv:20150630,CFIIASU_ID,37,98.02484,0.27775
20150630-201427.csv:20150630,CFIIASU_KR,39,98.42257,0.83055
20150630-201427.csv:20150630,CFIIASU_TH,24,99.94482,0.20743
20150701-151654.csv:20150630,CFIIASU,233,96.21450,0.44294
20150701-151654.csv:20150630,CFIIASU_AU,65,90.71109,0.28569
20150701-151654.csv:20150630,CFIIASU_CN,68,102.16538,0.07723
20150701-151654.csv:20150630,CFIIASU_ID,37,98.02484,0.27775
20150701-151654.csv:20150630,CFIIASU_KR,39,98.42257,0.83055
20150701-151654.csv:20150630,CFIIASU_TH,24,99.94482,0.20743
...
The data actually came from many csv files, I only pick two csv files to make the example, and here are some explainations of this:
the example came from two files 20150630-201427.csv and 20150701-151654.csv, and it has 4 columns which correspond to date, datanme, data_column1, data_column2, data_column3.
these line have the same data date 20150630 and the same dataname CFIIASU,CFIIASU_AU...etc, but the numbers in the fourth and fifth column (which are data_column2 and data_column3) are different.
How could i keep the data of 20150701-151654.csv based on the file's name and data date and apply it on my whole data set?
To make it more clearly. I'd like to keep the lines of "the latest csv" and since the latest csv is corresponding to the file's name, which in this example is 2015070. but when it comes to my whole data set i need to handle with so many 20xxxxxx.csv that i can't check it one by one.
for the example, i made this should end up like this:
20150701-151654.csv:20150630,CFIIASU,233,96.21450,0.44294
20150701-151654.csv:20150630,CFIIASU_AU,65,90.71109,0.28569
20150701-151654.csv:20150630,CFIIASU_CN,68,102.16538,0.07723
20150701-151654.csv:20150630,CFIIASU_ID,37,98.02484,0.27775
20150701-151654.csv:20150630,CFIIASU_KR,39,98.42257,0.83055
20150701-151654.csv:20150630,CFIIASU_TH,24,99.94482,0.20743
Thanks in advance.
A:
One way to do this would be to use the Unix command "awk" to filter the data based on the file name and date. Here's an example of how this could be done:
First, use the "awk" command to filter the data by the file name "20150701-151654.csv" and the date "20150630":
awk -F "," '$1 == "20150701-151654.csv" && $2 == "20150630"'
This will return only the lines that match the specified file name and date.
Next, you can use the "sort" command to sort the data by the dataname field, so that all the lines with the same dataname are grouped together:
awk -F "," '$1 == "20150701-151654.csv" && $2 == "20150630"' | sort -t "," -k3
Finally, you can use the "uniq" command to remove duplicate lines based on the dataname field, so that only the latest data is kept:
awk -F "," '$1 == "20150701-151654.csv" && $2 == "20150630"' | sort -t "," -k3 | uniq -f2
This should give you the desired output of only the latest data from the file "20150701-151654.csv" with the date "20150630". You can then apply this command to your entire data set to get the latest data for all dates and files.
A:
Your question isn't clear but it sounds like this might be what you're trying to do (print all lines from the last csv mentioned in the input file):
$ tac file | awk -F':' 'NR>1 && $1!=prev{exit} {print; prev=$1}' | tac
20150701-151654.csv:20150630,CFIIASU,233,96.21450,0.44294
20150701-151654.csv:20150630,CFIIASU_AU,65,90.71109,0.28569
20150701-151654.csv:20150630,CFIIASU_CN,68,102.16538,0.07723
20150701-151654.csv:20150630,CFIIASU_ID,37,98.02484,0.27775
20150701-151654.csv:20150630,CFIIASU_KR,39,98.42257,0.83055
20150701-151654.csv:20150630,CFIIASU_TH,24,99.94482,0.20743
or maybe this (print the last line seen for every 20150630,CFIIASU etc. pair in the input file):
$ tac file | awk -F'[:,]' '!seen[$2,$3]++' | tac
20150701-151654.csv:20150630,CFIIASU,233,96.21450,0.44294
20150701-151654.csv:20150630,CFIIASU_AU,65,90.71109,0.28569
20150701-151654.csv:20150630,CFIIASU_CN,68,102.16538,0.07723
20150701-151654.csv:20150630,CFIIASU_ID,37,98.02484,0.27775
20150701-151654.csv:20150630,CFIIASU_KR,39,98.42257,0.83055
20150701-151654.csv:20150630,CFIIASU_TH,24,99.94482,0.20743
| How to display the latest line based on the file's name or the line's position in bash | I have a tricky question about how to keep the latest log data as my server reposted it two times
This is the result after I grep from my folder :(i have tons of data, just to keep it simpler)
...
20150630-201427.csv:20150630,CFIIASU,233,96.21786,0.44644,
20150630-201427.csv:20150630,CFIIASU_AU,65,90.71109,0.28569
20150630-201427.csv:20150630,CFIIASU_CN,68,102.19569,0.10692
20150630-201427.csv:20150630,CFIIASU_ID,37,98.02484,0.27775
20150630-201427.csv:20150630,CFIIASU_KR,39,98.42257,0.83055
20150630-201427.csv:20150630,CFIIASU_TH,24,99.94482,0.20743
20150701-151654.csv:20150630,CFIIASU,233,96.21450,0.44294
20150701-151654.csv:20150630,CFIIASU_AU,65,90.71109,0.28569
20150701-151654.csv:20150630,CFIIASU_CN,68,102.16538,0.07723
20150701-151654.csv:20150630,CFIIASU_ID,37,98.02484,0.27775
20150701-151654.csv:20150630,CFIIASU_KR,39,98.42257,0.83055
20150701-151654.csv:20150630,CFIIASU_TH,24,99.94482,0.20743
...
The data actually came from many csv files, I only pick two csv files to make the example, and here are some explainations of this:
the example came from two files 20150630-201427.csv and 20150701-151654.csv, and it has 4 columns which correspond to date, datanme, data_column1, data_column2, data_column3.
these line have the same data date 20150630 and the same dataname CFIIASU,CFIIASU_AU...etc, but the numbers in the fourth and fifth column (which are data_column2 and data_column3) are different.
How could i keep the data of 20150701-151654.csv based on the file's name and data date and apply it on my whole data set?
To make it more clearly. I'd like to keep the lines of "the latest csv" and since the latest csv is corresponding to the file's name, which in this example is 2015070. but when it comes to my whole data set i need to handle with so many 20xxxxxx.csv that i can't check it one by one.
for the example, i made this should end up like this:
20150701-151654.csv:20150630,CFIIASU,233,96.21450,0.44294
20150701-151654.csv:20150630,CFIIASU_AU,65,90.71109,0.28569
20150701-151654.csv:20150630,CFIIASU_CN,68,102.16538,0.07723
20150701-151654.csv:20150630,CFIIASU_ID,37,98.02484,0.27775
20150701-151654.csv:20150630,CFIIASU_KR,39,98.42257,0.83055
20150701-151654.csv:20150630,CFIIASU_TH,24,99.94482,0.20743
Thanks in advance.
| [
"One way to do this would be to use the Unix command \"awk\" to filter the data based on the file name and date. Here's an example of how this could be done:\nFirst, use the \"awk\" command to filter the data by the file name \"20150701-151654.csv\" and the date \"20150630\":\nawk -F \",\" '$1 == \"20150701-151654.csv\" && $2 == \"20150630\"'\n\nThis will return only the lines that match the specified file name and date.\nNext, you can use the \"sort\" command to sort the data by the dataname field, so that all the lines with the same dataname are grouped together:\nawk -F \",\" '$1 == \"20150701-151654.csv\" && $2 == \"20150630\"' | sort -t \",\" -k3\n\nFinally, you can use the \"uniq\" command to remove duplicate lines based on the dataname field, so that only the latest data is kept:\nawk -F \",\" '$1 == \"20150701-151654.csv\" && $2 == \"20150630\"' | sort -t \",\" -k3 | uniq -f2\n\nThis should give you the desired output of only the latest data from the file \"20150701-151654.csv\" with the date \"20150630\". You can then apply this command to your entire data set to get the latest data for all dates and files.\n",
"Your question isn't clear but it sounds like this might be what you're trying to do (print all lines from the last csv mentioned in the input file):\n$ tac file | awk -F':' 'NR>1 && $1!=prev{exit} {print; prev=$1}' | tac\n20150701-151654.csv:20150630,CFIIASU,233,96.21450,0.44294\n20150701-151654.csv:20150630,CFIIASU_AU,65,90.71109,0.28569\n20150701-151654.csv:20150630,CFIIASU_CN,68,102.16538,0.07723\n20150701-151654.csv:20150630,CFIIASU_ID,37,98.02484,0.27775\n20150701-151654.csv:20150630,CFIIASU_KR,39,98.42257,0.83055\n20150701-151654.csv:20150630,CFIIASU_TH,24,99.94482,0.20743\n\nor maybe this (print the last line seen for every 20150630,CFIIASU etc. pair in the input file):\n$ tac file | awk -F'[:,]' '!seen[$2,$3]++' | tac\n20150701-151654.csv:20150630,CFIIASU,233,96.21450,0.44294\n20150701-151654.csv:20150630,CFIIASU_AU,65,90.71109,0.28569\n20150701-151654.csv:20150630,CFIIASU_CN,68,102.16538,0.07723\n20150701-151654.csv:20150630,CFIIASU_ID,37,98.02484,0.27775\n20150701-151654.csv:20150630,CFIIASU_KR,39,98.42257,0.83055\n20150701-151654.csv:20150630,CFIIASU_TH,24,99.94482,0.20743\n\n"
] | [
1,
1
] | [] | [] | [
"bash"
] | stackoverflow_0074674237_bash.txt |
Q:
ASP.NET C# Controller - How to store Application Objects using OnResultExecuted method?
I am constructing a simple login system. In my global.asax file, I created a method "OnResultExecuted" which is called after the action result executes. My idea is to store the username and password in an Application Object after the action result executes but I am not sure where to call it.
global.asax
public class BaseController : Controller
{
private string username, password;
public BaseController(string username, string password)
{
this.username = username;
this.password= password;
}
protected override void OnResultExecuted(ResultExecutedContext context)
{
// Generate an Application Object
base.OnResultExecuted(context);
}
}
In my LoginController.cs file, I want to create the application object upon a successful request.
LoginController.cs
public class LoginController : BaseController
{
public ActionResult Login()
{
return View();
}
[HttpPost]
public ActionResult LoginPOST(string username, string password)
{
// Is this how Application Objects are stored?
// How do I call OnResultExecuted in this ActionResult?
Application["username"] = username;
Application["password"] = password;
return View("Login");
}
}
My Issues
I am unsure on where to call the "OnResultExecuted" method in the LoginController.cs.
Is this the correct way to create Application Object to store the user's data? If no, please help!
I am getting an error on LoginController (from public class LoginController : BaseController) which states "There is no argument given that corresponds to the required parameter "username" of BaseController.BaseController(string, string)". I have searched for answers online but none seem to work for my case.
How do I access the Application Object after it is being stored?
What can I try next?
A:
It looks like you are trying to store the username and password in an Application Object. You can do this by calling OnResultExecuted() in your Login Controller's POST action as follows:
[HttpPost]
public ActionResult LoginPOST(string username, string password)
{
Application["username"] = username;
Application["password"] = password;
OnResultExecuted(null);
return View("Login");
}
To access the Application Object after it is stored, you can use the Application["key"] syntax. For example, you can retrieve the username by using Application["username"].
I hope this helps!
| ASP.NET C# Controller - How to store Application Objects using OnResultExecuted method? | I am constructing a simple login system. In my global.asax file, I created a method "OnResultExecuted" which is called after the action result executes. My idea is to store the username and password in an Application Object after the action result executes but I am not sure where to call it.
global.asax
public class BaseController : Controller
{
private string username, password;
public BaseController(string username, string password)
{
this.username = username;
this.password= password;
}
protected override void OnResultExecuted(ResultExecutedContext context)
{
// Generate an Application Object
base.OnResultExecuted(context);
}
}
In my LoginController.cs file, I want to create the application object upon a successful request.
LoginController.cs
public class LoginController : BaseController
{
public ActionResult Login()
{
return View();
}
[HttpPost]
public ActionResult LoginPOST(string username, string password)
{
// Is this how Application Objects are stored?
// How do I call OnResultExecuted in this ActionResult?
Application["username"] = username;
Application["password"] = password;
return View("Login");
}
}
My Issues
I am unsure on where to call the "OnResultExecuted" method in the LoginController.cs.
Is this the correct way to create Application Object to store the user's data? If no, please help!
I am getting an error on LoginController (from public class LoginController : BaseController) which states "There is no argument given that corresponds to the required parameter "username" of BaseController.BaseController(string, string)". I have searched for answers online but none seem to work for my case.
How do I access the Application Object after it is being stored?
What can I try next?
| [
"It looks like you are trying to store the username and password in an Application Object. You can do this by calling OnResultExecuted() in your Login Controller's POST action as follows:\n[HttpPost]\npublic ActionResult LoginPOST(string username, string password)\n{\n Application[\"username\"] = username;\n Application[\"password\"] = password;\n OnResultExecuted(null);\n return View(\"Login\");\n}\n\nTo access the Application Object after it is stored, you can use the Application[\"key\"] syntax. For example, you can retrieve the username by using Application[\"username\"].\nI hope this helps!\n"
] | [
0
] | [] | [] | [
"asp.net",
"c#",
"model_view_controller"
] | stackoverflow_0074555500_asp.net_c#_model_view_controller.txt |
Q:
Why fetch() does not exit in for loop?
I use fetch() to print labels. There are some scenario when I need to loop through the data and print multiple labels. The printing it's working fine, but after the first loop the script stop running.
I use async function in order to slow down a printing.
async function printing() {
for (let w = 0; w < print_labels.length; w++) {
const label = `data of the label`
fetch('http://111.111.1.111:9100', {
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded'
},
body: label
})
console.log(`${w}. label printed.`)
await sleep(2000);
}
};
printing()
Ho can I cancel/stop the fetch() function? Or maybe any other solution how to use fetch() post in for loop?
A:
you can use the AbortController and AbortSignal objects. These objects provide a way to abort an ongoing fetch() call, allowing you to cancel the request and stop it from running.
// Create a new AbortController and AbortSignal
const controller = new AbortController();
const signal = controller.signal;
// Use the AbortSignal in the fetch() call
fetch('http://111.111.1.111:9100', {
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded'
},
body: label,
signal: signal
});
// Use the AbortController to cancel the fetch() call
controller.abort();
Another way is using for loop to stop the fetch() call after each iteration.
async function printing() {
for (let w = 0; w < print_labels.length; w++) {
// Create a new AbortController and AbortSignal
const controller = new AbortController();
const signal = controller.signal;
// Use the AbortSignal in the fetch() call
fetch('http://111.111.1.111:9100', {
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded'
},
body: label,
signal: signal
});
console.log(`${w}. label printed.`)
await sleep(2000);
// Use the AbortController to cancel the fetch() call
controller.abort();
}
};
printing()
A:
AbortController needs to be used to abort requests, as another answer mentions. Both failed and aborted fetch will throw errors, and errors need to be correctly handled in order to not cause exceptions.
Generally request promise needs to be awaited. In this case race can be used to handle both request and delay promises:
let controller = new AbortController();
try {
await Promise.race([
fetch(..., { ... , signal: controller.signal }),
new Promise(resolve => {
setTimeout(resolve);
controller.abort();
}, 2000);
]));
} catch (err) {
console.error('Failed');
}
Aborted request causes an error that can be detected with err.name === 'AbortError' check and suppressed if necessary, but in this case this may not be needed because delay promise resolves first on timeout and makes Promise.race resolve with undefined with AbortError being suppressed.
| Why fetch() does not exit in for loop? | I use fetch() to print labels. There are some scenario when I need to loop through the data and print multiple labels. The printing it's working fine, but after the first loop the script stop running.
I use async function in order to slow down a printing.
async function printing() {
for (let w = 0; w < print_labels.length; w++) {
const label = `data of the label`
fetch('http://111.111.1.111:9100', {
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded'
},
body: label
})
console.log(`${w}. label printed.`)
await sleep(2000);
}
};
printing()
Ho can I cancel/stop the fetch() function? Or maybe any other solution how to use fetch() post in for loop?
| [
"you can use the AbortController and AbortSignal objects. These objects provide a way to abort an ongoing fetch() call, allowing you to cancel the request and stop it from running.\n// Create a new AbortController and AbortSignal\nconst controller = new AbortController();\nconst signal = controller.signal;\n\n// Use the AbortSignal in the fetch() call\nfetch('http://111.111.1.111:9100', {\n method: 'POST',\n headers: {\n 'Content-Type': 'application/x-www-form-urlencoded'\n },\n body: label,\n signal: signal\n});\n\n// Use the AbortController to cancel the fetch() call\ncontroller.abort();\n\nAnother way is using for loop to stop the fetch() call after each iteration.\nasync function printing() {\n for (let w = 0; w < print_labels.length; w++) {\n // Create a new AbortController and AbortSignal\n const controller = new AbortController();\n const signal = controller.signal;\n\n // Use the AbortSignal in the fetch() call\n fetch('http://111.111.1.111:9100', {\n method: 'POST',\n headers: {\n 'Content-Type': 'application/x-www-form-urlencoded'\n },\n body: label,\n signal: signal\n });\n\n console.log(`${w}. label printed.`)\n await sleep(2000);\n\n // Use the AbortController to cancel the fetch() call\n controller.abort();\n }\n}; \n\nprinting()\n\n",
"AbortController needs to be used to abort requests, as another answer mentions. Both failed and aborted fetch will throw errors, and errors need to be correctly handled in order to not cause exceptions.\nGenerally request promise needs to be awaited. In this case race can be used to handle both request and delay promises:\nlet controller = new AbortController();\n\ntry {\n await Promise.race([\n fetch(..., { ... , signal: controller.signal }),\n new Promise(resolve => {\n setTimeout(resolve);\n controller.abort();\n }, 2000);\n ]));\n} catch (err) {\n console.error('Failed');\n}\n\nAborted request causes an error that can be detected with err.name === 'AbortError' check and suppressed if necessary, but in this case this may not be needed because delay promise resolves first on timeout and makes Promise.race resolve with undefined with AbortError being suppressed.\n"
] | [
0,
0
] | [] | [] | [
"fetch",
"for_loop",
"node.js"
] | stackoverflow_0074674604_fetch_for_loop_node.js.txt |
Q:
How to use same proxy until specific response status?
I have currently written a code that has multiple threads (as example I used 50 threads) and for each thread only one proxy is allowed to be in one of these threads (meaning that 1 proxy cannot be in two threads).
import contextlib
import random
import threading
import time
import requests
my_proxies = [
'http://140.99.107.100:2100',
'http://140.99.107.101:2100',
'http://140.99.107.102:2100',
'http://140.99.107.103:2100',
'http://140.99.107.104:2100',
'http://140.99.107.105:2100',
'http://140.99.107.106:2100',
'http://140.99.107.107:2100',
'http://140.99.107.108:2100',
'http://140.99.107.109:2100',
'http://140.99.107.110:2100',
'http://140.99.107.111:2100',
'http://140.99.107.112:2100',
'http://140.99.107.113:2100',
'http://140.99.107.114:2100',
'http://140.99.107.115:2100',
'http://140.99.107.116:2100',
'http://140.99.107.117:2100',
'http://140.99.107.118:2100',
'http://140.99.107.119:2100',
'http://140.99.107.120:2100',
'http://140.99.107.121:2100',
'http://140.99.107.122:2100',
]
# --------------------------------------------------------------------------- #
class AvailableProxiesManager:
_proxy_lock: threading.Lock = threading.Lock()
def __init__(self):
self._proxy_dict = dict.fromkeys(my_proxies, True)
@property
@contextlib.contextmanager
def proxies(self):
"""
Context manager that yields a random proxy from the list of available proxies.
:return: dict[str, str] - A random proxy.
"""
proxy = None
with self._proxy_lock:
while not proxy:
if available := [att for att, value in self._proxy_dict.items() if value]:
proxy = random.choice(available)
self._proxy_dict[proxy] = False
else:
print('Waiting ... no proxies available')
time.sleep(.2)
yield proxy
self._proxy_dict[proxy] = True # Return the proxy to the list of available proxies
# --------------------------------------------------------------------------- #
available_proxies = AvailableProxiesManager()
def main():
while True:
with available_proxies.proxies as proxy:
response = requests.get('https://httpbin.org/ip', proxies={'https': proxy})
if response.status_code == 403:
print('Lets put proxy on cooldown for 10 minutes and try with new one!')
time.sleep(120)
if __name__ == '__main__':
threads = []
for i in range(50):
t = threading.Thread(target=main)
threads.append(t)
t.start()
time.sleep(1)
However my problem is that currently for every while True that is going on, it uses a new random proxy and instead what I am trying to achieve is that I want the same proxy to be used in the same thread until the response status is 403. That means that in the beginning if thread-1 gets the proxy: http://140.99.107.100:2100 then it should be used in thread-1 until it gets 403.
My question is, how can I be able to make the same proxy to be used until it hits response 403?
Expect:
Proxy to be the same until 403
Actual:
New proxy for every GET requests
A:
What if you stop using a context manager,
(remove @contextlib.contextmanager)
and do something like this:
def main():
proxy = next(available_proxies.proxies)
while True:
response = requests.get('https://httpbin.org/ip', proxies={'https': proxy})
if response.status_code == 403:
proxy = next(available_proxies.proxies)
time.sleep(120)
Hope that helps, good luck !
| How to use same proxy until specific response status? | I have currently written a code that has multiple threads (as example I used 50 threads) and for each thread only one proxy is allowed to be in one of these threads (meaning that 1 proxy cannot be in two threads).
import contextlib
import random
import threading
import time
import requests
my_proxies = [
'http://140.99.107.100:2100',
'http://140.99.107.101:2100',
'http://140.99.107.102:2100',
'http://140.99.107.103:2100',
'http://140.99.107.104:2100',
'http://140.99.107.105:2100',
'http://140.99.107.106:2100',
'http://140.99.107.107:2100',
'http://140.99.107.108:2100',
'http://140.99.107.109:2100',
'http://140.99.107.110:2100',
'http://140.99.107.111:2100',
'http://140.99.107.112:2100',
'http://140.99.107.113:2100',
'http://140.99.107.114:2100',
'http://140.99.107.115:2100',
'http://140.99.107.116:2100',
'http://140.99.107.117:2100',
'http://140.99.107.118:2100',
'http://140.99.107.119:2100',
'http://140.99.107.120:2100',
'http://140.99.107.121:2100',
'http://140.99.107.122:2100',
]
# --------------------------------------------------------------------------- #
class AvailableProxiesManager:
_proxy_lock: threading.Lock = threading.Lock()
def __init__(self):
self._proxy_dict = dict.fromkeys(my_proxies, True)
@property
@contextlib.contextmanager
def proxies(self):
"""
Context manager that yields a random proxy from the list of available proxies.
:return: dict[str, str] - A random proxy.
"""
proxy = None
with self._proxy_lock:
while not proxy:
if available := [att for att, value in self._proxy_dict.items() if value]:
proxy = random.choice(available)
self._proxy_dict[proxy] = False
else:
print('Waiting ... no proxies available')
time.sleep(.2)
yield proxy
self._proxy_dict[proxy] = True # Return the proxy to the list of available proxies
# --------------------------------------------------------------------------- #
available_proxies = AvailableProxiesManager()
def main():
while True:
with available_proxies.proxies as proxy:
response = requests.get('https://httpbin.org/ip', proxies={'https': proxy})
if response.status_code == 403:
print('Lets put proxy on cooldown for 10 minutes and try with new one!')
time.sleep(120)
if __name__ == '__main__':
threads = []
for i in range(50):
t = threading.Thread(target=main)
threads.append(t)
t.start()
time.sleep(1)
However my problem is that currently for every while True that is going on, it uses a new random proxy and instead what I am trying to achieve is that I want the same proxy to be used in the same thread until the response status is 403. That means that in the beginning if thread-1 gets the proxy: http://140.99.107.100:2100 then it should be used in thread-1 until it gets 403.
My question is, how can I be able to make the same proxy to be used until it hits response 403?
Expect:
Proxy to be the same until 403
Actual:
New proxy for every GET requests
| [
"What if you stop using a context manager,\n(remove @contextlib.contextmanager)\nand do something like this:\ndef main():\n proxy = next(available_proxies.proxies)\n while True:\n response = requests.get('https://httpbin.org/ip', proxies={'https': proxy})\n if response.status_code == 403:\n proxy = next(available_proxies.proxies)\n\n time.sleep(120)\n\nHope that helps, good luck !\n"
] | [
0
] | [] | [] | [
"dictionary",
"list",
"multithreading",
"python_3.x",
"python_requests"
] | stackoverflow_0074674686_dictionary_list_multithreading_python_3.x_python_requests.txt |
Q:
how to import a module that using another module at the grandparent directories in Python
I'm trying to run a python file that imports a module using other modules in the grandparent folder. The file structure is:
directory_0
|
directory_1
| |
| directory_2
| |
| __init__.py (define the method A and import another method B from file_2.py)
| |
| file_1.py
|
directory_3
|
file_2.py (define the method B)
I want to run file_1.py that imports method A defined in __init__.py, and __init__.py it imports method B from file_2.py.
I'm currently at /directory_0/directory_1/directory_2 to run the command python file_1.py. It throws ModuleNotFoundError: No module named directory_3.file_2
How to make it run? and which path should I go to run this script (file_1.py).
A:
Probably you need __init__.py in all 1-3 directories.
Try to use next syntaxis
import ...directory_3.file_2
A:
my final approach is import in the way:
in file_1.py:
from directory_1.directory_2 import A
and jump to the path that can access all children modules to run file_1.py as a module:
from /directory_0 run by python -m directory_1.directory_2.file_1
| how to import a module that using another module at the grandparent directories in Python | I'm trying to run a python file that imports a module using other modules in the grandparent folder. The file structure is:
directory_0
|
directory_1
| |
| directory_2
| |
| __init__.py (define the method A and import another method B from file_2.py)
| |
| file_1.py
|
directory_3
|
file_2.py (define the method B)
I want to run file_1.py that imports method A defined in __init__.py, and __init__.py it imports method B from file_2.py.
I'm currently at /directory_0/directory_1/directory_2 to run the command python file_1.py. It throws ModuleNotFoundError: No module named directory_3.file_2
How to make it run? and which path should I go to run this script (file_1.py).
| [
"Probably you need __init__.py in all 1-3 directories.\nTry to use next syntaxis\nimport ...directory_3.file_2\n",
"my final approach is import in the way:\nin file_1.py:\nfrom directory_1.directory_2 import A\n\nand jump to the path that can access all children modules to run file_1.py as a module:\nfrom /directory_0 run by python -m directory_1.directory_2.file_1\n"
] | [
0,
0
] | [] | [] | [
"init",
"python",
"python_3.x",
"python_import",
"relative_import"
] | stackoverflow_0074674599_init_python_python_3.x_python_import_relative_import.txt |
Q:
How can I test stopPropagation of a click event using React Testing Library?
I have a simple Icon component that accepts a onClick() prop which is called when clicked on the icon. Additionally each time the icon is clicked another function event.stopPropagation() is called. This function is a property of the actual click event fired by the icon (=represents a basic span).
Now I want to check two things:
The onClick prop function should be called.
The stopPropagation callback passed via the event should be called.
Previously I was using enzyme to test which worked perfectly fine.
test('Icon should call the callback on when space is pressed', () => {
const onClick = jest.fn();
const stopPropagation = jest.fn();
const icon = shallow(<Icon className="test" name="su-pen" onClick={onClick} />);
icon.simulate('keypress', {key: ' ', stopPropagation});
expect(onClick).toBeCalled();
expect(stopPropagation).toBeCalled();
});
Now I want to migrate this to React Testing Library. I have tried it with fireEvent but stopPropagation() doesn't get called.
test('Icon should call the callback on click', () => {
const onClick = jest.fn();
const stopPropagation = jest.fn();
render(<Icon className="test" name="su-pen" onClick={onClick} />);
const icon = screen.queryByLabelText('su-pen');
fireEvent.click(icon, {stopPropagation});
expect(onClick).toBeCalled();
expect(stopPropagation).toBeCalled();
// ^ --> failed
// Expected number of calls: >= 1
// Received number of calls: 0
});
A:
You are testing the internals of the component this way, not its behavior.
I would rather wrap that in a dummy element with an onclick handler, and check that that is not called when clicking the icon:
test('Icon should not propagate the click event', () => {
const onClick = jest.fn();
const onOuterClick = jest.fn();
render(
<div onClick={onOuterClick}>
<Icon className="test" name="su-pen" onClick={onClick} />
</div>
);
const icon = screen.queryByLabelText('su-pen');
fireEvent.click(icon);
expect(onClick).toHaveBeenCalledTimes(1);
expect(onOuterClick).toHaveBeenCalledTimes(0);
});
| How can I test stopPropagation of a click event using React Testing Library? | I have a simple Icon component that accepts a onClick() prop which is called when clicked on the icon. Additionally each time the icon is clicked another function event.stopPropagation() is called. This function is a property of the actual click event fired by the icon (=represents a basic span).
Now I want to check two things:
The onClick prop function should be called.
The stopPropagation callback passed via the event should be called.
Previously I was using enzyme to test which worked perfectly fine.
test('Icon should call the callback on when space is pressed', () => {
const onClick = jest.fn();
const stopPropagation = jest.fn();
const icon = shallow(<Icon className="test" name="su-pen" onClick={onClick} />);
icon.simulate('keypress', {key: ' ', stopPropagation});
expect(onClick).toBeCalled();
expect(stopPropagation).toBeCalled();
});
Now I want to migrate this to React Testing Library. I have tried it with fireEvent but stopPropagation() doesn't get called.
test('Icon should call the callback on click', () => {
const onClick = jest.fn();
const stopPropagation = jest.fn();
render(<Icon className="test" name="su-pen" onClick={onClick} />);
const icon = screen.queryByLabelText('su-pen');
fireEvent.click(icon, {stopPropagation});
expect(onClick).toBeCalled();
expect(stopPropagation).toBeCalled();
// ^ --> failed
// Expected number of calls: >= 1
// Received number of calls: 0
});
| [
"You are testing the internals of the component this way, not its behavior.\nI would rather wrap that in a dummy element with an onclick handler, and check that that is not called when clicking the icon:\ntest('Icon should not propagate the click event', () => {\n const onClick = jest.fn();\n const onOuterClick = jest.fn();\n\n render(\n <div onClick={onOuterClick}>\n <Icon className=\"test\" name=\"su-pen\" onClick={onClick} />\n </div>\n );\n const icon = screen.queryByLabelText('su-pen');\n fireEvent.click(icon);\n \n expect(onClick).toHaveBeenCalledTimes(1);\n expect(onOuterClick).toHaveBeenCalledTimes(0);\n});\n\n\n"
] | [
0
] | [] | [] | [
"enzyme",
"javascript",
"jestjs",
"react_testing_library",
"reactjs"
] | stackoverflow_0073192216_enzyme_javascript_jestjs_react_testing_library_reactjs.txt |
Q:
Game Loop makes GUI "blinking"
I have a Game Loop:
public void startGameThread() {
gameLoop();
gameThread = new Thread(this);
gameThread.start();
}
public void gameLoop() {
frame++;
if (System.currentTimeMillis() - lastCheck >= 1000) {
lastCheck = System.currentTimeMillis();
System.out.println("FPS " + frame);
frame = 0;
}
}
@Override
public void run() {
double timePerFrame = 1000000000.0/FPS;
long lastFrame = System.nanoTime();
long now = System.nanoTime();
while (true) {
now = System.nanoTime();
if (System.nanoTime() - lastFrame >= timePerFrame) {
repaint();
update();
gameLoop();
lastFrame = now;
}
}
}
and Main Class with GUI:
package MainPackage;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import javax.swing.JButton;
import javax.swing.JFrame;
public class MainClass implements ActionListener {
public static boolean Clicked = false;
public static void main(String[] args) {
PanelClass pClass = new PanelClass();
JButton start = new JButton("START");
start.setSize(120, 50);
start.setFocusable(false);
start.setLocation(630, 200);
start.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent e) {
if(e.getSource() == start) {
pClass.startGameThread();
Clicked = true;
start.setVisible(false);
}
}
});
start.setVisible(true);
JFrame frame = new JFrame("Tanks");
frame.setSize(1600, 913);
frame.setLayout(null);
frame.pack();
frame.setExtendedState(JFrame.MAXIMIZED_BOTH);
frame.add(start);
frame.add(pClass);
frame.setVisible(true);
}
@Override
public void actionPerformed(ActionEvent e) {
}
}
After launching my application, GUI is "blinking". I think that's because of the Game loop. But how to fix it? Any help is really appreciated!
I was expecting to get the usual behavior of the GUI. Ask me to add more details; code, if needed.
Typing this, because stackoverflow need more "details". That's probably stupid, but I don't know what details to add.
A:
There is nothing wrong with your game loop. The way you are creating the JFrame is wrong.
Mistake 1:
Do not use JFrame.setSize() use JFrame.setPreferredSize() instead.
Mistake 2:
Always call JFrame.pack() after adding all the components and before making the frame visible.
Mistake 3:
Do not use null layout for JFrame.
After fixing these mistakes your game was running without any problems.
PanelClass pClass = new PanelClass();
JButton start = new JButton("START");
start.setSize(120, 50);
start.setFocusable(false);
start.setLocation(630, 200);
start.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent e) {
if(e.getSource() == start) {
pClass.startGameThread();
Clicked = true;
start.setVisible(false);
}
}
});
start.setVisible(true);
JFrame frame = new JFrame("Tanks");
frame.setPreferredSize(new Dimension(1200, 913));
frame.setExtendedState(JFrame.MAXIMIZED_BOTH);
frame.add(start);
frame.add(pClass);
frame.setVisible(true);
frame.pack();
Complete code:
import java.awt.*;
import java.awt.event.*;
import javax.swing.*;
public class Game implements ActionListener {
public static boolean Clicked = false;
public static void main(String[] args) {
PanelClass pClass = new PanelClass();
JButton start = new JButton("START");
start.setSize(120, 50);
start.setFocusable(false);
start.setLocation(630, 200);
start.addActionListener(e -> {
if(e.getSource() == start) {
pClass.startGameThread();
Clicked = true;
start.setVisible(false);
}
});
start.setVisible(true);
JFrame frame = new JFrame("Tanks");
frame.setPreferredSize(new Dimension(1200, 913));
frame.setMinimumSize(new Dimension(300, 200));
frame.setExtendedState(JFrame.MAXIMIZED_BOTH);
frame.add(start);
frame.add(pClass);
frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE);
frame.setVisible(true);
frame.pack();
}
@Override
public void actionPerformed(ActionEvent e) {
}
static class PanelClass extends JPanel implements Runnable{
int posX = 0;
Thread gameThread;
int frame;
long lastCheck;
int FPS = 60;
public void startGameThread() {
gameLoop();
gameThread = new Thread(this);
gameThread.start();
}
public void gameLoop() {
frame++;
if (System.currentTimeMillis() - lastCheck >= 1000) {
lastCheck = System.currentTimeMillis();
System.out.println("FPS " + frame);
frame = 0;
}
}
@Override
public void run() {
double timePerFrame = 1000000000.0/FPS;
long lastFrame = System.nanoTime();
long now = System.nanoTime();
while (true) {
now = System.nanoTime();
if (System.nanoTime() - lastFrame >= timePerFrame) {
repaint();
gameLoop();
lastFrame = now;
}
}
}
@Override
protected void paintComponent(Graphics g) {
super.paintComponent(g);
posX++;
g.fillRect(posX,getHeight()/2,10,10);
Toolkit.getDefaultToolkit().sync();
}
}
}
The game loop works and there is no blinking/flickering on ubuntu or windows 7 compiled with JDK 17.
Bonus tip: If you are overriding the paint method of your PanelClass then use Toolkit.getDefaultToolkit().sync() in the end. This method ensures that the display is up-to-date.
| Game Loop makes GUI "blinking" | I have a Game Loop:
public void startGameThread() {
gameLoop();
gameThread = new Thread(this);
gameThread.start();
}
public void gameLoop() {
frame++;
if (System.currentTimeMillis() - lastCheck >= 1000) {
lastCheck = System.currentTimeMillis();
System.out.println("FPS " + frame);
frame = 0;
}
}
@Override
public void run() {
double timePerFrame = 1000000000.0/FPS;
long lastFrame = System.nanoTime();
long now = System.nanoTime();
while (true) {
now = System.nanoTime();
if (System.nanoTime() - lastFrame >= timePerFrame) {
repaint();
update();
gameLoop();
lastFrame = now;
}
}
}
and Main Class with GUI:
package MainPackage;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import javax.swing.JButton;
import javax.swing.JFrame;
public class MainClass implements ActionListener {
public static boolean Clicked = false;
public static void main(String[] args) {
PanelClass pClass = new PanelClass();
JButton start = new JButton("START");
start.setSize(120, 50);
start.setFocusable(false);
start.setLocation(630, 200);
start.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent e) {
if(e.getSource() == start) {
pClass.startGameThread();
Clicked = true;
start.setVisible(false);
}
}
});
start.setVisible(true);
JFrame frame = new JFrame("Tanks");
frame.setSize(1600, 913);
frame.setLayout(null);
frame.pack();
frame.setExtendedState(JFrame.MAXIMIZED_BOTH);
frame.add(start);
frame.add(pClass);
frame.setVisible(true);
}
@Override
public void actionPerformed(ActionEvent e) {
}
}
After launching my application, GUI is "blinking". I think that's because of the Game loop. But how to fix it? Any help is really appreciated!
I was expecting to get the usual behavior of the GUI. Ask me to add more details; code, if needed.
Typing this, because stackoverflow need more "details". That's probably stupid, but I don't know what details to add.
| [
"There is nothing wrong with your game loop. The way you are creating the JFrame is wrong.\nMistake 1:\nDo not use JFrame.setSize() use JFrame.setPreferredSize() instead.\nMistake 2:\nAlways call JFrame.pack() after adding all the components and before making the frame visible.\nMistake 3:\nDo not use null layout for JFrame.\nAfter fixing these mistakes your game was running without any problems.\nPanelClass pClass = new PanelClass();\nJButton start = new JButton(\"START\");\nstart.setSize(120, 50);\nstart.setFocusable(false);\nstart.setLocation(630, 200);\nstart.addActionListener(new ActionListener() {\n public void actionPerformed(ActionEvent e) {\n if(e.getSource() == start) {\n pClass.startGameThread();\n Clicked = true;\n start.setVisible(false);\n }\n }\n});\nstart.setVisible(true);\nJFrame frame = new JFrame(\"Tanks\");\nframe.setPreferredSize(new Dimension(1200, 913));\nframe.setExtendedState(JFrame.MAXIMIZED_BOTH);\nframe.add(start);\nframe.add(pClass);\nframe.setVisible(true);\nframe.pack();\n\nComplete code:\nimport java.awt.*;\nimport java.awt.event.*;\n\nimport javax.swing.*;\n\npublic class Game implements ActionListener {\n public static boolean Clicked = false;\n\n public static void main(String[] args) {\n PanelClass pClass = new PanelClass();\n JButton start = new JButton(\"START\");\n start.setSize(120, 50);\n start.setFocusable(false);\n start.setLocation(630, 200);\n start.addActionListener(e -> {\n if(e.getSource() == start) {\n pClass.startGameThread();\n Clicked = true;\n start.setVisible(false);\n }\n });\n start.setVisible(true);\n JFrame frame = new JFrame(\"Tanks\");\n frame.setPreferredSize(new Dimension(1200, 913));\n frame.setMinimumSize(new Dimension(300, 200));\n frame.setExtendedState(JFrame.MAXIMIZED_BOTH);\n frame.add(start);\n frame.add(pClass);\n frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE);\n frame.setVisible(true);\n frame.pack();\n }\n\n @Override\n public void actionPerformed(ActionEvent e) {\n\n }\n static class PanelClass extends JPanel implements Runnable{\n int posX = 0;\n Thread gameThread;\n int frame;\n long lastCheck;\n int FPS = 60;\n public void startGameThread() {\n gameLoop();\n gameThread = new Thread(this);\n gameThread.start();\n }\n\n public void gameLoop() {\n frame++;\n if (System.currentTimeMillis() - lastCheck >= 1000) {\n lastCheck = System.currentTimeMillis();\n System.out.println(\"FPS \" + frame);\n frame = 0;\n }\n }\n\n @Override\n public void run() {\n double timePerFrame = 1000000000.0/FPS;\n long lastFrame = System.nanoTime();\n long now = System.nanoTime();\n while (true) {\n now = System.nanoTime();\n if (System.nanoTime() - lastFrame >= timePerFrame) {\n repaint();\n gameLoop();\n lastFrame = now;\n }\n }\n }\n\n @Override\n protected void paintComponent(Graphics g) {\n super.paintComponent(g);\n posX++;\n g.fillRect(posX,getHeight()/2,10,10);\n Toolkit.getDefaultToolkit().sync();\n }\n }\n\n}\n\nThe game loop works and there is no blinking/flickering on ubuntu or windows 7 compiled with JDK 17.\nBonus tip: If you are overriding the paint method of your PanelClass then use Toolkit.getDefaultToolkit().sync() in the end. This method ensures that the display is up-to-date.\n"
] | [
0
] | [] | [] | [
"game_loop",
"java",
"multithreading",
"user_interface"
] | stackoverflow_0074674936_game_loop_java_multithreading_user_interface.txt |
Q:
Does Kotlin have macros?
I have the following peace of code:
class JiraCredentials(applicationContext: Context)
{
private val preferences = applicationContext.getSharedPreferences(
"jira",
ComponentActivity.MODE_PRIVATE
)
private val username_key = "username"
var username: String
get () = preferences.getString (username_key, "").toString()
set (value) {
val editor = preferences.edit()
editor.putString (username_key, value)
editor.commit ()
}
private val password_key = "password"
var password: String
get () = preferences.getString (password_key, "").toString()
set (value) {
val editor = preferences.edit()
editor.putString (password_key, value)
editor.commit ()
}
}
As you can see the "username" part is almost the same as the "password" part. Other languages (Scheme, Rust) have "hygienic macros" to handle this. What is the idiomatic way to handle this in Kotlin?
A:
In this specific case, you can use property delegates to reduce the duplication.
class PreferenceDelegate(
val key: String,
val preferences: SharedPreferences
) {
operator fun getValue(self: Any?, property: KProperty<*>) =
preferences.getString(key, "").toString()
operator fun setValue(self: Any?, property: KProperty<*>, value: String) {
val editor = preferences.edit()
editor.putString(key, value)
editor.commit()
}
}
Usage:
var username by PreferenceDelegate("username", preferences)
var password by PreferenceDelegate("password", preferences)
If the key is always the same as the property name, then you can remove the key parameter since the key can be retrieved from property.name.
If you are only using this in JiraCredentials, you can remove the preferences parameter, since you can get that from self.preferences.
Here is an example with both parameters removed:
// this can be put inside JiraCredentials so that you can access "preferences"
object PreferenceDelegate {
operator fun getValue(self: JiraCredentials, property: KProperty<*>) =
preferences.getString(property.name, "").toString()
operator fun setValue(self: JiraCredentials, property: KProperty<*>, value: String) {
val editor = self.preferences.edit()
editor.putString(property.name, value)
editor.commit()
}
}
// Usage:
var username by PreferenceDelegate
var password by PreferenceDelegate
A:
I just found out that some poeple try to implement something for Kotlin, which has a similar functionality. The effort is called KotlinPoet.
| Does Kotlin have macros? | I have the following peace of code:
class JiraCredentials(applicationContext: Context)
{
private val preferences = applicationContext.getSharedPreferences(
"jira",
ComponentActivity.MODE_PRIVATE
)
private val username_key = "username"
var username: String
get () = preferences.getString (username_key, "").toString()
set (value) {
val editor = preferences.edit()
editor.putString (username_key, value)
editor.commit ()
}
private val password_key = "password"
var password: String
get () = preferences.getString (password_key, "").toString()
set (value) {
val editor = preferences.edit()
editor.putString (password_key, value)
editor.commit ()
}
}
As you can see the "username" part is almost the same as the "password" part. Other languages (Scheme, Rust) have "hygienic macros" to handle this. What is the idiomatic way to handle this in Kotlin?
| [
"In this specific case, you can use property delegates to reduce the duplication.\nclass PreferenceDelegate(\n val key: String,\n val preferences: SharedPreferences\n) {\n operator fun getValue(self: Any?, property: KProperty<*>) =\n preferences.getString(key, \"\").toString()\n\n operator fun setValue(self: Any?, property: KProperty<*>, value: String) {\n val editor = preferences.edit()\n editor.putString(key, value)\n editor.commit()\n }\n}\n\nUsage:\nvar username by PreferenceDelegate(\"username\", preferences)\nvar password by PreferenceDelegate(\"password\", preferences)\n\nIf the key is always the same as the property name, then you can remove the key parameter since the key can be retrieved from property.name.\nIf you are only using this in JiraCredentials, you can remove the preferences parameter, since you can get that from self.preferences.\nHere is an example with both parameters removed:\n// this can be put inside JiraCredentials so that you can access \"preferences\"\nobject PreferenceDelegate {\n operator fun getValue(self: JiraCredentials, property: KProperty<*>) =\n preferences.getString(property.name, \"\").toString()\n\n operator fun setValue(self: JiraCredentials, property: KProperty<*>, value: String) {\n val editor = self.preferences.edit()\n editor.putString(property.name, value)\n editor.commit()\n }\n}\n\n// Usage:\nvar username by PreferenceDelegate\nvar password by PreferenceDelegate\n\n",
"I just found out that some poeple try to implement something for Kotlin, which has a similar functionality. The effort is called KotlinPoet.\n"
] | [
4,
0
] | [] | [] | [
"kotlin",
"macros"
] | stackoverflow_0074363886_kotlin_macros.txt |
Q:
How can I fill horizontal space in flutter?
Column(
children: [
Padding(
padding: const EdgeInsets.only(
left: 15, bottom: 8, top: 1),
child: Row(
//crossAxisAlignment: CrossAxisAlignment.start,
//mainAxisAlignment: MainAxisAlignment.start,
children: [
Column(
crossAxisAlignment: CrossAxisAlignment.start,
children: [
// ITEM NAME
Padding(
padding: const EdgeInsets.only(
left: 10, top: 10),
child: Row(
mainAxisAlignment:
MainAxisAlignment.spaceBetween,
children: [
Text(
'${restaurantItems[i].name}',
style: TextStyle(
fontSize: 17,
color: kTextColor,
fontWeight: FontWeight.w700),
),
SizedBox(
width: width / 2,
),
// 'ADD' BUTTON CONTAINER
Container(
decoration: BoxDecoration(
borderRadius:
BorderRadius.circular(8),
color: Colors.black87,
),
child: Padding(
padding: const EdgeInsets.only(
left: 9,
top: 3,
right: 5,
bottom: 3),
child: InkWell(
splashColor: Colors.white,
onTap: () {
// print(restaurantItems[i].name);
cart.addItem(
restaurantItems[i].id,
restaurantItems[i].name,
restaurantItems[i].price,
restaurant,
);
},
child: Row(
children: [
Text(
'ADD',
style: TextStyle(
color: Colors.white,
),
),
Icon(
Icons.add,
color: Colors.white,
size: 17,
),
],
),
),
),
),
],
),
),
Padding(
padding: const EdgeInsets.only(
left: 10, top: 10, bottom: 11),
child: Text(
'₹${restaurantItems[i].price}',
style: TextStyle(
fontSize: 15,
color: kTextColor,
fontWeight: FontWeight.w500),
),
),
// Padding(
// padding: const EdgeInsets.only(
// left: 17, top: 17),
// child: InkWell(
// onTap: () {
// // Add to Cart
// },
// child: Row(
// children: [
// Padding(
// padding:
// const EdgeInsets.only(left: 15),
// child: Text(
// '${restaurantItems[i].quantity} Left',
// style: TextStyle(
// color: kTextLightColor,
// fontSize: 13,
// fontWeight: FontWeight.w700),
// ),
// )
// ],
// ),
// ),
// )
],
),
],
),
),
],
),
How can I fill the space between 'ITEM NAME' and 'ADD CONTAINER'? I have tried spacer(), but it doesn't work. Also, I have seen Expanded() widget but since I am new to flutter, I can't seem to get a hang of it. I have also added the Column widget, because of which the Spacer() widget is not working I guess.
Any help would be appreciated, thanks.
A:
Use SizedBox widget between 'ITEM NAME' and 'ADD CONTAINER'. for example:
Widget build(BuildContext context) {
double height = MediaQuery.of(context).size.height;
double width = MediaQuery.of(context).size.width;
return Scaffold(
body: SafeArea(
child: Padding(
padding: const EdgeInsets.only(left: 10, top: 10),
child: Row(
mainAxisAlignment: MainAxisAlignment.spaceBetween,
children: [
Text(
'test',
style: TextStyle(
fontSize: 17,
color: Colors.black,
fontWeight: FontWeight.w700),
),
// Spacer()
SizedBox(width: width/2,)
// 'ADD' BUTTON CONTAINER
Container(
decoration: BoxDecoration(
borderRadius: BorderRadius.circular(8),
color: Colors.black87,
),
child: Padding(
padding:
const EdgeInsets.only(left: 9, top: 3, right: 5, bottom: 3),
child: InkWell(
splashColor: Colors.white,
onTap: () {
// print(restaurantItems[i].name);
// cart.addItem(
// restaurantItems[i].id,
// restaurantItems[i].name,
// restaurantItems[i].price,
// restaurant,
// );
},
child: Row(
children: [
Text(
'ADD',
style: TextStyle(
color: Colors.white,
),
),
Icon(
Icons.add,
color: Colors.white,
size: 17,
),
],
),
),
),
),
],
),
),));
}
A:
For providing space between widgets you can use SizedBox(width:10)
A:
You need two additional widgets. The idea:
body: Row(children: [
Expanded(
child: Column(children: [ //This is you topmost column. Wrap it with Expanded and then with Row.
Row(
mainAxisAlignment: MainAxisAlignment.spaceBetween,
children: const [Text('ITEM NAME'), TextButton(onPressed: () {}, child: Text('ADD'))])
]))
])
| How can I fill horizontal space in flutter? | Column(
children: [
Padding(
padding: const EdgeInsets.only(
left: 15, bottom: 8, top: 1),
child: Row(
//crossAxisAlignment: CrossAxisAlignment.start,
//mainAxisAlignment: MainAxisAlignment.start,
children: [
Column(
crossAxisAlignment: CrossAxisAlignment.start,
children: [
// ITEM NAME
Padding(
padding: const EdgeInsets.only(
left: 10, top: 10),
child: Row(
mainAxisAlignment:
MainAxisAlignment.spaceBetween,
children: [
Text(
'${restaurantItems[i].name}',
style: TextStyle(
fontSize: 17,
color: kTextColor,
fontWeight: FontWeight.w700),
),
SizedBox(
width: width / 2,
),
// 'ADD' BUTTON CONTAINER
Container(
decoration: BoxDecoration(
borderRadius:
BorderRadius.circular(8),
color: Colors.black87,
),
child: Padding(
padding: const EdgeInsets.only(
left: 9,
top: 3,
right: 5,
bottom: 3),
child: InkWell(
splashColor: Colors.white,
onTap: () {
// print(restaurantItems[i].name);
cart.addItem(
restaurantItems[i].id,
restaurantItems[i].name,
restaurantItems[i].price,
restaurant,
);
},
child: Row(
children: [
Text(
'ADD',
style: TextStyle(
color: Colors.white,
),
),
Icon(
Icons.add,
color: Colors.white,
size: 17,
),
],
),
),
),
),
],
),
),
Padding(
padding: const EdgeInsets.only(
left: 10, top: 10, bottom: 11),
child: Text(
'₹${restaurantItems[i].price}',
style: TextStyle(
fontSize: 15,
color: kTextColor,
fontWeight: FontWeight.w500),
),
),
// Padding(
// padding: const EdgeInsets.only(
// left: 17, top: 17),
// child: InkWell(
// onTap: () {
// // Add to Cart
// },
// child: Row(
// children: [
// Padding(
// padding:
// const EdgeInsets.only(left: 15),
// child: Text(
// '${restaurantItems[i].quantity} Left',
// style: TextStyle(
// color: kTextLightColor,
// fontSize: 13,
// fontWeight: FontWeight.w700),
// ),
// )
// ],
// ),
// ),
// )
],
),
],
),
),
],
),
How can I fill the space between 'ITEM NAME' and 'ADD CONTAINER'? I have tried spacer(), but it doesn't work. Also, I have seen Expanded() widget but since I am new to flutter, I can't seem to get a hang of it. I have also added the Column widget, because of which the Spacer() widget is not working I guess.
Any help would be appreciated, thanks.
| [
"Use SizedBox widget between 'ITEM NAME' and 'ADD CONTAINER'. for example:\nWidget build(BuildContext context) {\n double height = MediaQuery.of(context).size.height;\n double width = MediaQuery.of(context).size.width;\n return Scaffold(\n body: SafeArea(\n child: Padding(\n padding: const EdgeInsets.only(left: 10, top: 10),\n child: Row(\n mainAxisAlignment: MainAxisAlignment.spaceBetween,\n children: [\n Text(\n 'test',\n style: TextStyle(\n fontSize: 17,\n color: Colors.black,\n fontWeight: FontWeight.w700),\n ),\n // Spacer()\n SizedBox(width: width/2,)\n // 'ADD' BUTTON CONTAINER\n Container(\n decoration: BoxDecoration(\n borderRadius: BorderRadius.circular(8),\n color: Colors.black87,\n ),\n child: Padding(\n padding:\n const EdgeInsets.only(left: 9, top: 3, right: 5, bottom: 3),\n child: InkWell(\n splashColor: Colors.white,\n onTap: () {\n // print(restaurantItems[i].name);\n // cart.addItem(\n // restaurantItems[i].id,\n // restaurantItems[i].name,\n // restaurantItems[i].price,\n // restaurant,\n // );\n },\n child: Row(\n children: [\n Text(\n 'ADD',\n style: TextStyle(\n color: Colors.white,\n ),\n ),\n Icon(\n Icons.add,\n color: Colors.white,\n size: 17,\n ),\n ],\n ),\n ),\n ),\n ),\n ],\n ),\n ),));\n }\n\n",
"For providing space between widgets you can use SizedBox(width:10)\n",
"You need two additional widgets. The idea:\n body: Row(children: [\n Expanded( \n child: Column(children: [ //This is you topmost column. Wrap it with Expanded and then with Row.\n Row(\n mainAxisAlignment: MainAxisAlignment.spaceBetween,\n children: const [Text('ITEM NAME'), TextButton(onPressed: () {}, child: Text('ADD'))])\n ]))\n ])\n\n"
] | [
1,
0,
0
] | [
"Use Spacer() to fill the space if you don't want to define size of width\n",
"Try this:\nText(\n '${restaurantItems[i].name}',\n style: TextStyle(\n fontSize: 17,\n color: kTextColor,\n fontWeight: FontWeight.w700,\n ),\n),\nSpacer(flex: 1),\n// 'ADD' BUTTON CONTAINER\nContainer(\n decoration: BoxDecoration(\n borderRadius: BorderRadius.circular(8),\n color: Colors.black87,\n ),\n child: Padding(\n padding: const EdgeInsets.only(\n left: 9,\n top: 3,\n right: 5,\n bottom: 3,\n ),\n child: InkWell(\n splashColor: Colors.white,\n onTap: () {\n // print(restaurantItems[i].name);\n cart.addItem(\n restaurantItems[i].id,\n restaurantItems[i].name,\n restaurantItems[i].price,\n restaurant,\n );\n },\n child: Row(\n children: [\n Text(\n 'ADD',\n style: TextStyle(\n color: Colors.white,\n ),\n ),\n Icon(\n Icons.add,\n color: Colors.white,\n size: 17,\n ), // Icon\n ], // <Widget>[]\n ), // Row\n ), // InkWell\n ), // Padding\n), // Container\n\nSpacer(flex: 4),\n\n"
] | [
-1,
-1
] | [
"dart",
"flutter",
"flutter_layout"
] | stackoverflow_0066289868_dart_flutter_flutter_layout.txt |
Q:
static code analysis ida disassembler REM
I got this assignment to defuse a binary bomb, and I am not quit sure how to solve it or the approach.
Do anyone have experience with reverse malware engineering, who help me to try to analyse the binary code?
enter image description here
I put the bomb.exe in IDA, where it shows the binary code.
A:
I did not ask you to do my homework? I did asked for help to the approach to solve the assignment, there is a difference :)
| static code analysis ida disassembler REM | I got this assignment to defuse a binary bomb, and I am not quit sure how to solve it or the approach.
Do anyone have experience with reverse malware engineering, who help me to try to analyse the binary code?
enter image description here
I put the bomb.exe in IDA, where it shows the binary code.
| [
"I did not ask you to do my homework? I did asked for help to the approach to solve the assignment, there is a difference :)\n"
] | [
0
] | [] | [] | [
"ida",
"malware",
"reverse",
"static",
"static_code_analysis"
] | stackoverflow_0074583030_ida_malware_reverse_static_static_code_analysis.txt |
Q:
executing the operation written in a column pandas
I have a series of column with numbers to put into
different formulas (in my example I use only sum and product).
And the final column should give me the result of the formula (I get "None" instead).
In my example, if it is written "2 + 1" I would simply like to have 3 as result of my operation
Can you suggest me the right solution please ?
import pandas as pd
operation = ["+", "*", "+", "*"]
op_number = ["Op1", "Op2", "Op3", "Op4"]
number_1 =[1,3,5,6]
number_2 =[2,4,2,3]
operation = pd.Series(operation)
op_number = pd.Series(op_number)
number_1 = pd.Series(number_1)
number_2 = pd.Series(number_2)
frame = { 'operation': operation, 'op_number': op_number,
'number_1' : number_1, 'number_2':number_2}
s1 = pd.DataFrame(frame)
s1['derived'] = s1['number_1'].astype(str) + " " + s1['operation'] + " " + s1['number_2'].astype(str)
s1['result'] = s1['derived'].apply(lambda x : exec(x) )
s1
A:
I would not recommend using pandas for this. However, if you want the solution in pandas then here is it:
You are doing exec() which works well but it always returns None.
Hence, replace exec() with eval().
Here's your updated code:
import pandas as pd
operation = ["+", "*", "+", "*"]
op_number = ["Op1", "Op2", "Op3", "Op4"]
number_1 =[1,3,5,6]
number_2 =[2,4,2,3]
operation = pd.Series(operation)
op_number = pd.Series(op_number)
number_1 = pd.Series(number_1)
number_2 = pd.Series(number_2)
frame = { 'operation': operation, 'op_number': op_number,
'number_1' : number_1, 'number_2':number_2}
s1 = pd.DataFrame(frame)
s1['derived'] = s1['number_1'].astype(str) + " " + s1['operation'] + " " + s1['number_2'].astype(str)
s1['result'] = s1['derived'].apply(lambda x : eval(x) )
print(s1)
Thanks to @MaxShawabkeh
| executing the operation written in a column pandas | I have a series of column with numbers to put into
different formulas (in my example I use only sum and product).
And the final column should give me the result of the formula (I get "None" instead).
In my example, if it is written "2 + 1" I would simply like to have 3 as result of my operation
Can you suggest me the right solution please ?
import pandas as pd
operation = ["+", "*", "+", "*"]
op_number = ["Op1", "Op2", "Op3", "Op4"]
number_1 =[1,3,5,6]
number_2 =[2,4,2,3]
operation = pd.Series(operation)
op_number = pd.Series(op_number)
number_1 = pd.Series(number_1)
number_2 = pd.Series(number_2)
frame = { 'operation': operation, 'op_number': op_number,
'number_1' : number_1, 'number_2':number_2}
s1 = pd.DataFrame(frame)
s1['derived'] = s1['number_1'].astype(str) + " " + s1['operation'] + " " + s1['number_2'].astype(str)
s1['result'] = s1['derived'].apply(lambda x : exec(x) )
s1
| [
"I would not recommend using pandas for this. However, if you want the solution in pandas then here is it:\nYou are doing exec() which works well but it always returns None.\nHence, replace exec() with eval().\nHere's your updated code:\nimport pandas as pd\noperation = [\"+\", \"*\", \"+\", \"*\"]\nop_number = [\"Op1\", \"Op2\", \"Op3\", \"Op4\"]\nnumber_1 =[1,3,5,6] \nnumber_2 =[2,4,2,3] \noperation = pd.Series(operation)\nop_number = pd.Series(op_number)\nnumber_1 = pd.Series(number_1)\nnumber_2 = pd.Series(number_2)\nframe = { 'operation': operation, 'op_number': op_number,\n 'number_1' : number_1, 'number_2':number_2} \ns1 = pd.DataFrame(frame)\ns1['derived'] = s1['number_1'].astype(str) + \" \" + s1['operation'] + \" \" + s1['number_2'].astype(str) \ns1['result'] = s1['derived'].apply(lambda x : eval(x) )\nprint(s1)\n\nThanks to @MaxShawabkeh\n"
] | [
0
] | [] | [] | [
"array_formulas",
"formula",
"pandas",
"python"
] | stackoverflow_0074675328_array_formulas_formula_pandas_python.txt |
Q:
What gcode keyword is responsible for "Click to resume" feature?
I want to make a gcode to make it easier to level my ender 3 pro, that uses the marlin firmware. On previous CHEP levelling gcodes, i've seen the "Click to resume" feature that waits for the user to click the knob to move to the other spot. I can not seem to find anything about this on the internet at all. Any of you guys have any idea of how to accomplish this "Click to resume" feature in gcode?
I tried googling about it and i did not seem to find anything related to this.
A:
You can use the M0 command in your G-code file to pause the printer and wait for the user to click the knob to continue. Here is an example:
M0
This will pause the printer until the user clicks the knob to resume the printing process.
If you want to display a message on the printer's screen while it is paused, you can use the M0 command with the "P" parameter followed by the message you want to display, like this:
M0 P"Please click the knob to continue"
This will pause the printer and display the specified message on the screen until the user clicks the knob to continue. You can read more about the M0 command here
| What gcode keyword is responsible for "Click to resume" feature? | I want to make a gcode to make it easier to level my ender 3 pro, that uses the marlin firmware. On previous CHEP levelling gcodes, i've seen the "Click to resume" feature that waits for the user to click the knob to move to the other spot. I can not seem to find anything about this on the internet at all. Any of you guys have any idea of how to accomplish this "Click to resume" feature in gcode?
I tried googling about it and i did not seem to find anything related to this.
| [
"You can use the M0 command in your G-code file to pause the printer and wait for the user to click the knob to continue. Here is an example:\nM0\n\nThis will pause the printer until the user clicks the knob to resume the printing process.\nIf you want to display a message on the printer's screen while it is paused, you can use the M0 command with the \"P\" parameter followed by the message you want to display, like this:\nM0 P\"Please click the knob to continue\"\n\nThis will pause the printer and display the specified message on the screen until the user clicks the knob to continue. You can read more about the M0 command here\n"
] | [
2
] | [] | [] | [
"3d_printing",
"g_code"
] | stackoverflow_0074675277_3d_printing_g_code.txt |
Q:
How to add an auto-incrementing column in a dataframe based on another column?
I have a PySpark dataframe similar to below:
order_id item qty
123 abc 1
123 abc1 4
234 abc2 5
234 abc3 2
234 abc4 7
123 abc5 5
456 abc6 9
456 abc7 8
456 abc8 9
I want to add an auto-incrementing column based on the column 'order_id' and the expected result is:
order_id item qty AutoIncrementingColumn_orderID
123 abc 1 1
123 abc1 4 2
234 abc2 5 1
234 abc3 2 2
234 abc4 7 3
123 abc5 5 3
456 abc6 9 1
456 abc7 8 2
456 abc8 9 3
I couldn't find solutions to generate based on another column, any idea how to achieve?
A:
You can use row_number:
from pyspark.sql import functions as F, Window
df2 = df.withColumn(
'AutoIncrementingColumn_orderID',
F.row_number().over(Window.partitionBy('order_id').orderBy('item'))
)
df2.show()
+--------+----+---+------------------------------+
|order_id|item|qty|AutoIncrementingColumn_orderID|
+--------+----+---+------------------------------+
| 234|abc2| 5| 1|
| 234|abc3| 2| 2|
| 234|abc4| 7| 3|
| 456|abc6| 9| 1|
| 456|abc7| 8| 2|
| 456|abc8| 9| 3|
| 123| abc| 1| 1|
| 123|abc1| 4| 2|
| 123|abc5| 5| 3|
+--------+----+---+------------------------------+
A:
Couple of ways of doing it:
Here is the sql way :
df=Ss.sql("""
select order_id,item,qty,row_number() over(partition by order_id order by qty) as autoInc
from (
select order_id,item,qty
from ( values
(123,'abc',1 ),
(123,'abc1',4),
(234,'abc2',5),
(234,'abc3',2),
(234,'abc4',7),
(123,'abc5',5),
(456,'abc6',9),
(456,'abc7',8),
(456,'abc8',9)
) as T(order_id,item,qty))""")
df.show()
Output:
+--------+----+---+-------+
|order_id|item|qty|autoInc|
+--------+----+---+-------+
| 456|abc7| 8| 1|
| 456|abc6| 9| 2|
| 456|abc8| 9| 3|
| 234|abc3| 2| 1|
| 234|abc2| 5| 2|
| 234|abc4| 7| 3|
| 123| abc| 1| 1|
| 123|abc1| 4| 2|
| 123|abc5| 5| 3|
+--------+----+---+-------+
A:
def getidmap(idmap:dict):
def generate_id(name:str,maxID:int):
for key, value in name_id_map.items():
if name in name_id_map.values():
if name == value:
return key
else:
max_ID = max_ID+1
return max_ID
return F.udf(generate_household_id)
Table_Name = 'dbTable'
if spark._jsparkSession.catalog().tableExists('DB_Name',Table_Name):
maxID = spark.sql("select max(ID) from DB_Name.{}".format(Table_Name)).first()[0]
df_id_check = spark.sql("select * from DB_Name.{}".format(Table_Name))
name_id_map = df_id_check.select('ID', 'Name').rdd.collectAsMap()
dim_data = dim_data.withColumn("ID",getidmap(name_id_map)(F.col('Name'),lit(maxID)))
else:
dim_household_valid_data =dim_household_valid_data.withColumn('order', row_number().over(Window.partitionBy(lit('1')).orderBy(lit('1'))))\
.withColumn('ID', dense_rank().over(Window.partitionBy().orderBy('Name'))+0).orderBy('order')\
.drop('order')
| How to add an auto-incrementing column in a dataframe based on another column? | I have a PySpark dataframe similar to below:
order_id item qty
123 abc 1
123 abc1 4
234 abc2 5
234 abc3 2
234 abc4 7
123 abc5 5
456 abc6 9
456 abc7 8
456 abc8 9
I want to add an auto-incrementing column based on the column 'order_id' and the expected result is:
order_id item qty AutoIncrementingColumn_orderID
123 abc 1 1
123 abc1 4 2
234 abc2 5 1
234 abc3 2 2
234 abc4 7 3
123 abc5 5 3
456 abc6 9 1
456 abc7 8 2
456 abc8 9 3
I couldn't find solutions to generate based on another column, any idea how to achieve?
| [
"You can use row_number:\nfrom pyspark.sql import functions as F, Window\n\ndf2 = df.withColumn(\n 'AutoIncrementingColumn_orderID', \n F.row_number().over(Window.partitionBy('order_id').orderBy('item'))\n)\n\ndf2.show()\n+--------+----+---+------------------------------+\n|order_id|item|qty|AutoIncrementingColumn_orderID|\n+--------+----+---+------------------------------+\n| 234|abc2| 5| 1|\n| 234|abc3| 2| 2|\n| 234|abc4| 7| 3|\n| 456|abc6| 9| 1|\n| 456|abc7| 8| 2|\n| 456|abc8| 9| 3|\n| 123| abc| 1| 1|\n| 123|abc1| 4| 2|\n| 123|abc5| 5| 3|\n+--------+----+---+------------------------------+\n\n",
"Couple of ways of doing it:\nHere is the sql way :\ndf=Ss.sql(\"\"\"\nselect order_id,item,qty,row_number() over(partition by order_id order by qty) as autoInc\nfrom (\nselect order_id,item,qty\nfrom ( values \n(123,'abc',1 ),\n(123,'abc1',4),\n(234,'abc2',5),\n(234,'abc3',2),\n(234,'abc4',7),\n(123,'abc5',5),\n(456,'abc6',9),\n(456,'abc7',8),\n(456,'abc8',9)\n) as T(order_id,item,qty))\"\"\")\n\ndf.show()\n\nOutput:\n+--------+----+---+-------+\n|order_id|item|qty|autoInc|\n+--------+----+---+-------+\n| 456|abc7| 8| 1|\n| 456|abc6| 9| 2|\n| 456|abc8| 9| 3|\n| 234|abc3| 2| 1|\n| 234|abc2| 5| 2|\n| 234|abc4| 7| 3|\n| 123| abc| 1| 1|\n| 123|abc1| 4| 2|\n| 123|abc5| 5| 3|\n+--------+----+---+-------+\n\n",
"def getidmap(idmap:dict):\n def generate_id(name:str,maxID:int):\n for key, value in name_id_map.items():\n if name in name_id_map.values():\n if name == value:\n return key\n else:\n max_ID = max_ID+1\n return max_ID\n return F.udf(generate_household_id)\n\nTable_Name = 'dbTable'\n\nif spark._jsparkSession.catalog().tableExists('DB_Name',Table_Name):\n maxID = spark.sql(\"select max(ID) from DB_Name.{}\".format(Table_Name)).first()[0]\n df_id_check = spark.sql(\"select * from DB_Name.{}\".format(Table_Name))\n name_id_map = df_id_check.select('ID', 'Name').rdd.collectAsMap()\n dim_data = dim_data.withColumn(\"ID\",getidmap(name_id_map)(F.col('Name'),lit(maxID)))\n \nelse:\n dim_household_valid_data =dim_household_valid_data.withColumn('order', row_number().over(Window.partitionBy(lit('1')).orderBy(lit('1'))))\\\n .withColumn('ID', dense_rank().over(Window.partitionBy().orderBy('Name'))+0).orderBy('order')\\\n .drop('order')\n\n"
] | [
1,
1,
0
] | [] | [] | [
"apache_spark",
"apache_spark_sql",
"auto_increment",
"pyspark"
] | stackoverflow_0065735916_apache_spark_apache_spark_sql_auto_increment_pyspark.txt |
Q:
TypeScript filter out nulls from an array
TypeScript, --strictNullChecks mode.
Suppose I have an array of nullable strings (string | null)[]. What would be a single-expression way to remove all nulls in a such a way that the result has type string[]?
const array: (string | null)[] = ["foo", "bar", null, "zoo", null];
const filterdArray: string[] = ???;
Array.filter does not work here:
// Type '(string | null)[]' is not assignable to type 'string[]'
array.filter(x => x != null);
Array comprehensions could've work but they are not supported by TypeScript.
Actually the question can be generalized to the problem of filtering an array of any union type by removing entries having one particular type from the union. But let's focus on unions with null and perhaps undefined as these are the most common usecases.
A:
You can use a type predicate function in the .filter to avoid opting out of strict type checking:
function notEmpty<TValue>(value: TValue | null | undefined): value is TValue {
return value !== null && value !== undefined;
}
const array: (string | null)[] = ['foo', 'bar', null, 'zoo', null];
const filteredArray: string[] = array.filter(notEmpty);
Alternatively you can use array.reduce<string[]>(...).
2021 update: stricter predicates
While this solution works in most scenarios, you can get a more rigorous type check in the predicate. As presented, the function notEmpty does not actually guarantee that it identifies correctly whether the value is null or undefined at compile time. For example, try shortening its return statement down to return value !== null;, and you'll see no compiler error, even though the function will incorrectly return true on undefined.
One way to mitigate this is to constrain the type first using control flow blocks, and then to use a dummy variable to give the compiler something to check. In the example below, the compiler is able to infer that the value parameter cannot be a null or undefined by the time it gets to the assignment. However, if you remove || value === undefined from the if condition, you will see a compiler error, informing you of the bug in the example above.
function notEmpty<TValue>(value: TValue | null | undefined): value is TValue {
if (value === null || value === undefined) return false;
const testDummy: TValue = value;
return true;
}
A word of caution: there exist situations where this method can still fail you. Be sure to be mindful of issues associated with contravariance.
A:
Similar to @bijou-trouvaille's answer, you just need to declare the <arg> is <Type> as the output of the filter function:
array.filter((x): x is MyType => x !== null);
A:
One more for good measure as people often forget about flatMap which can handle filter and map in one go (this also doesn't require any casting to string[]):
// (string | null)[]
const arr = ["a", null, "b", "c"];
// string[]
const stringsOnly = arr.flatMap(f => f ? [f] : []);
A:
One liner:
const filteredArray: string[] = array.filter((s): s is string => Boolean(s));
TypeScript playground
The trick is to pass a type predicate (:s is string syntax).
This answer shows that Array.filter requires users to provide a type predicate.
A:
You can cast your filter result into the type you want:
const array: (string | null)[] = ["foo", "bar", null, "zoo", null];
const filterdArray = array.filter(x => x != null) as string[];
This works for the more general use case that you mentioned, for example:
const array2: (string | number)[] = ["str1", 1, "str2", 2];
const onlyStrings = array2.filter(x => typeof x === "string") as string[];
const onlyNumbers = array2.filter(x => typeof x === "number") as number[];
(code in playground)
A:
To avoid everybody having to write the same type guard helper functions over and over again I bundled functions called isPresent, isDefined and isFilled into a helper library: https://www.npmjs.com/package/ts-is-present
The type definitions are currently:
export declare function isPresent<T>(t: T | undefined | null): t is T;
export declare function isDefined<T>(t: T | undefined): t is T;
export declare function isFilled<T>(t: T | null): t is T;
You can use this like so:
import { isDefined } from 'ts-is-present';
type TestData = {
data: string;
};
const results: Array<TestData | undefined> = [
{ data: 'hello' },
undefined,
{ data: 'world' }
];
const definedResults: Array<TestData> = results.filter(isDefined);
console.log(definedResults);
When Typescript bundles this functionality in I'll remove the package. But, for now, enjoy.
A:
Here is a solution that uses NonNullable. I find it even a little bit more concise than the accepted answer by @bijou-trouvaille
function notEmpty<TValue>(value: TValue): value is NonNullable<TValue> {
return value !== null && value !== undefined;
}
const array: (string | null | undefined)[] = ['foo', 'bar', null, 'zoo', undefined];
const filteredArray: string[] = array.filter(notEmpty);
console.log(filteredArray)
[LOG]: ["foo", "bar", "zoo"]
A:
Just realized that you can do this:
const nonNull = array.filter((e): e is Exclude<typeof e, null> => e !== null)
So that you:
get a one-liner, no additional functions
do not have to know the type of array elements, so you can copy this everywhere!
A:
If you already use Lodash, you can use compact.
Or, if you prefer Ramda, the ramda-adjunct has also compact function.
Both have types, so your tsc will be happy and get the correct types as a result.
From Lodash d.ts file:
/**
* Creates an array with all falsey values removed. The values false, null, 0, "", undefined, and NaN are
* falsey.
*
* @param array The array to compact.
* @return Returns the new array of filtered values.
*/
compact<T>(array: List<T | null | undefined | false | "" | 0> | null | undefined): T[];
A:
I believe you have it all good except that the type checking just makes the filtered type not be different than the return type.
const array: (string | null)[] = ["foo", "bar", null, "zoo", null];
const filterdArray: string[] = array.filter(f => f !== undefined && f !== null) as any;
console.log(filterdArray);
A:
I think this will be an easy approach, with more cleaner code
const array: (string | null)[] = ['foo', 'bar', null, 'zoo', null];
const filteredArray: string[] = array.filter(a => !!a);
A:
simply use
array.filter(Boolean);
This will work for all truth values.
This, unfortunately, do not provide type inference, found this solution on
here
type Truthy<T> = T extends false | '' | 0 | null | undefined ? never : T; //from lodash
function truthy<T>(value: T): value is Truthy<T> {
return Boolean(value); // or !!value
}
const arr =["hello","felow","developer","",null,undefined];
const truthyArr = arr.filter(truthy);
// the type of truthyArr will be string[]
A:
const filterdArray = array.filter(f => !!f) as string[];
A:
If you are checking null with other conditions using filter simply this can be used hope this helps for some one who is looking solutions for an object array
array.filter(x => x != null);
array.filter(x => (x != null) && (x.name == 'Tom'));
A:
TypeScript has some utilities to infer the type of the array and exclude the null values from it:
const arrayWithNulls = ["foo", "bar", null, "zoo", null]
type ArrayWithoutNulls = NonNullable<typeof arrayWithNulls[number]>[]
const arrayWithoutNulls = arrayWithNulls.filter(x => x != null) as ArrayWithoutNulls
Longer but safer than just manually casting as string[] on your new array.
Step by step:
Get the types from the original array:
typeof arrayWithNulls[number] // => string | null
Exclude the null values:
NonNullable<typeof arrayWithNulls[number]> // => string
Make it an array:
NonNullable<typeof arrayWithNulls[number]>[] // => string[]
Links:
NonNullable (official doc)
typeof array[number] (blog post, I couldn't find anything about it on the official doc)
A:
Using reduce
Some answers suggest reduce, here is how:
const languages = ["fr", "en", undefined, null, "", "de"]
// the one I prefer:
languages.reduce<string[]>((previous, current) => current ? [...previous, current] : previous, [])
// or
languages.reduce((previous, current) => current ? [...previous, current] : previous, Array<string>())
// or
const reducer = (previous: string[], current: string | undefined | null) => current ? [...previous, current] : previous
languages.reduce(reducer, [])
Result: ["fr", "en", "de"]
TS Playground here.
A:
I've come back to this question many times hoping some new Typescript feature or typing may fix it.
Here's a simple trick I quite like for when combining map with a subsequent filter.
const animals = ['cat', 'dog', 'mouse', 'sheep'];
const notDogAnimals = animals.map(a =>
{
if (a == 'dog')
{
return null!; // just skip dog
}
else {
return { animal: a };
}
}).filter(a => a);
You'll see I'm returning null! which actually becomes type never - meaning that the final type doesn't have null.
This is a slight variation on the original question but I find myself in this scenario quite often and it helps avoid another method call. Hopefully someday Typescript will come up with a better way.
A:
The shortest way:
const validData = array.filter(Boolean)
A:
Or you can try the package: @p4ck93/ts-is
https://www.npmjs.com/package/@p4ck493/ts-is
The example uses the CDN method, but the package also supports typescript.
<script>var exports = {};</script>
<script src="//unpkg.com/@p4ck493/[email protected]/dist/index.js"></script>
<script>
const {is} = exports;
console.log('is.string: ', is.string('')); // true
console.log('is.string.empty: ', is.string.empty('')); // true
console.log('is.string.not.empty: ', is.string.not.empty('')); // false
const array = ["foo", "bar", null, "zoo", null];
const filterdArray = array.filter(is.string.not.empty);
console.log('array:', array);
console.log('filterdArray:', filterdArray);
</script>
| TypeScript filter out nulls from an array | TypeScript, --strictNullChecks mode.
Suppose I have an array of nullable strings (string | null)[]. What would be a single-expression way to remove all nulls in a such a way that the result has type string[]?
const array: (string | null)[] = ["foo", "bar", null, "zoo", null];
const filterdArray: string[] = ???;
Array.filter does not work here:
// Type '(string | null)[]' is not assignable to type 'string[]'
array.filter(x => x != null);
Array comprehensions could've work but they are not supported by TypeScript.
Actually the question can be generalized to the problem of filtering an array of any union type by removing entries having one particular type from the union. But let's focus on unions with null and perhaps undefined as these are the most common usecases.
| [
"You can use a type predicate function in the .filter to avoid opting out of strict type checking:\nfunction notEmpty<TValue>(value: TValue | null | undefined): value is TValue {\n return value !== null && value !== undefined;\n}\n\nconst array: (string | null)[] = ['foo', 'bar', null, 'zoo', null];\nconst filteredArray: string[] = array.filter(notEmpty);\n\nAlternatively you can use array.reduce<string[]>(...).\n2021 update: stricter predicates\nWhile this solution works in most scenarios, you can get a more rigorous type check in the predicate. As presented, the function notEmpty does not actually guarantee that it identifies correctly whether the value is null or undefined at compile time. For example, try shortening its return statement down to return value !== null;, and you'll see no compiler error, even though the function will incorrectly return true on undefined.\nOne way to mitigate this is to constrain the type first using control flow blocks, and then to use a dummy variable to give the compiler something to check. In the example below, the compiler is able to infer that the value parameter cannot be a null or undefined by the time it gets to the assignment. However, if you remove || value === undefined from the if condition, you will see a compiler error, informing you of the bug in the example above.\nfunction notEmpty<TValue>(value: TValue | null | undefined): value is TValue {\n if (value === null || value === undefined) return false;\n const testDummy: TValue = value;\n return true;\n}\n\nA word of caution: there exist situations where this method can still fail you. Be sure to be mindful of issues associated with contravariance.\n",
"Similar to @bijou-trouvaille's answer, you just need to declare the <arg> is <Type> as the output of the filter function:\narray.filter((x): x is MyType => x !== null);\n\n",
"One more for good measure as people often forget about flatMap which can handle filter and map in one go (this also doesn't require any casting to string[]):\n// (string | null)[]\nconst arr = [\"a\", null, \"b\", \"c\"];\n// string[]\nconst stringsOnly = arr.flatMap(f => f ? [f] : []);\n\n",
"One liner:\nconst filteredArray: string[] = array.filter((s): s is string => Boolean(s));\n\nTypeScript playground\nThe trick is to pass a type predicate (:s is string syntax).\nThis answer shows that Array.filter requires users to provide a type predicate.\n",
"You can cast your filter result into the type you want:\nconst array: (string | null)[] = [\"foo\", \"bar\", null, \"zoo\", null];\nconst filterdArray = array.filter(x => x != null) as string[];\n\nThis works for the more general use case that you mentioned, for example:\nconst array2: (string | number)[] = [\"str1\", 1, \"str2\", 2];\nconst onlyStrings = array2.filter(x => typeof x === \"string\") as string[];\nconst onlyNumbers = array2.filter(x => typeof x === \"number\") as number[];\n\n(code in playground)\n",
"To avoid everybody having to write the same type guard helper functions over and over again I bundled functions called isPresent, isDefined and isFilled into a helper library: https://www.npmjs.com/package/ts-is-present\nThe type definitions are currently:\nexport declare function isPresent<T>(t: T | undefined | null): t is T;\nexport declare function isDefined<T>(t: T | undefined): t is T;\nexport declare function isFilled<T>(t: T | null): t is T;\n\nYou can use this like so:\nimport { isDefined } from 'ts-is-present';\n\ntype TestData = {\n data: string;\n};\n\nconst results: Array<TestData | undefined> = [\n { data: 'hello' },\n undefined,\n { data: 'world' }\n];\n\nconst definedResults: Array<TestData> = results.filter(isDefined);\n\nconsole.log(definedResults);\n\nWhen Typescript bundles this functionality in I'll remove the package. But, for now, enjoy.\n",
"Here is a solution that uses NonNullable. I find it even a little bit more concise than the accepted answer by @bijou-trouvaille\nfunction notEmpty<TValue>(value: TValue): value is NonNullable<TValue> {\n return value !== null && value !== undefined;\n}\n\nconst array: (string | null | undefined)[] = ['foo', 'bar', null, 'zoo', undefined];\n\nconst filteredArray: string[] = array.filter(notEmpty);\nconsole.log(filteredArray)\n[LOG]: [\"foo\", \"bar\", \"zoo\"]\n\n",
"Just realized that you can do this:\nconst nonNull = array.filter((e): e is Exclude<typeof e, null> => e !== null)\n\nSo that you:\n\nget a one-liner, no additional functions\ndo not have to know the type of array elements, so you can copy this everywhere!\n\n",
"If you already use Lodash, you can use compact.\nOr, if you prefer Ramda, the ramda-adjunct has also compact function.\nBoth have types, so your tsc will be happy and get the correct types as a result.\nFrom Lodash d.ts file:\n/**\n * Creates an array with all falsey values removed. The values false, null, 0, \"\", undefined, and NaN are\n * falsey.\n *\n * @param array The array to compact.\n * @return Returns the new array of filtered values.\n */\ncompact<T>(array: List<T | null | undefined | false | \"\" | 0> | null | undefined): T[];\n\n",
"I believe you have it all good except that the type checking just makes the filtered type not be different than the return type.\nconst array: (string | null)[] = [\"foo\", \"bar\", null, \"zoo\", null];\nconst filterdArray: string[] = array.filter(f => f !== undefined && f !== null) as any;\nconsole.log(filterdArray);\n\n",
"I think this will be an easy approach, with more cleaner code\nconst array: (string | null)[] = ['foo', 'bar', null, 'zoo', null];\nconst filteredArray: string[] = array.filter(a => !!a);\n\n",
"simply use\narray.filter(Boolean);\n\n\nThis will work for all truth values.\nThis, unfortunately, do not provide type inference, found this solution on\nhere\n\ntype Truthy<T> = T extends false | '' | 0 | null | undefined ? never : T; //from lodash \n\nfunction truthy<T>(value: T): value is Truthy<T> {\n return Boolean(value); // or !!value\n}\n\nconst arr =[\"hello\",\"felow\",\"developer\",\"\",null,undefined];\n\nconst truthyArr = arr.filter(truthy);\n\n// the type of truthyArr will be string[]\n\n\n",
"const filterdArray = array.filter(f => !!f) as string[];\n\n",
"If you are checking null with other conditions using filter simply this can be used hope this helps for some one who is looking solutions for an object array\narray.filter(x => x != null);\narray.filter(x => (x != null) && (x.name == 'Tom'));\n\n",
"TypeScript has some utilities to infer the type of the array and exclude the null values from it:\nconst arrayWithNulls = [\"foo\", \"bar\", null, \"zoo\", null]\n\ntype ArrayWithoutNulls = NonNullable<typeof arrayWithNulls[number]>[]\n\nconst arrayWithoutNulls = arrayWithNulls.filter(x => x != null) as ArrayWithoutNulls\n\nLonger but safer than just manually casting as string[] on your new array.\nStep by step:\n\nGet the types from the original array:\n\ntypeof arrayWithNulls[number] // => string | null\n\n\nExclude the null values:\n\nNonNullable<typeof arrayWithNulls[number]> // => string\n\n\nMake it an array:\n\nNonNullable<typeof arrayWithNulls[number]>[] // => string[]\n\nLinks:\n\nNonNullable (official doc)\ntypeof array[number] (blog post, I couldn't find anything about it on the official doc)\n\n",
"Using reduce\nSome answers suggest reduce, here is how:\nconst languages = [\"fr\", \"en\", undefined, null, \"\", \"de\"]\n\n// the one I prefer:\nlanguages.reduce<string[]>((previous, current) => current ? [...previous, current] : previous, [])\n\n// or\nlanguages.reduce((previous, current) => current ? [...previous, current] : previous, Array<string>())\n\n// or\nconst reducer = (previous: string[], current: string | undefined | null) => current ? [...previous, current] : previous\nlanguages.reduce(reducer, [])\n\nResult: [\"fr\", \"en\", \"de\"]\nTS Playground here.\n",
"I've come back to this question many times hoping some new Typescript feature or typing may fix it.\nHere's a simple trick I quite like for when combining map with a subsequent filter.\nconst animals = ['cat', 'dog', 'mouse', 'sheep'];\n\nconst notDogAnimals = animals.map(a => \n{\n if (a == 'dog')\n {\n return null!; // just skip dog\n }\n else {\n return { animal: a };\n }\n}).filter(a => a);\n\nYou'll see I'm returning null! which actually becomes type never - meaning that the final type doesn't have null.\nThis is a slight variation on the original question but I find myself in this scenario quite often and it helps avoid another method call. Hopefully someday Typescript will come up with a better way.\n",
"The shortest way:\nconst validData = array.filter(Boolean)\n\n",
"Or you can try the package: @p4ck93/ts-is\nhttps://www.npmjs.com/package/@p4ck493/ts-is\nThe example uses the CDN method, but the package also supports typescript.\n\n\n<script>var exports = {};</script>\n<script src=\"//unpkg.com/@p4ck493/[email protected]/dist/index.js\"></script>\n<script>\n const {is} = exports;\n console.log('is.string: ', is.string('')); // true\n console.log('is.string.empty: ', is.string.empty('')); // true\n console.log('is.string.not.empty: ', is.string.not.empty('')); // false\n \n \n const array = [\"foo\", \"bar\", null, \"zoo\", null];\n const filterdArray = array.filter(is.string.not.empty);\n \n console.log('array:', array);\n console.log('filterdArray:', filterdArray);\n</script>\n\n\n\n"
] | [
336,
208,
162,
26,
14,
10,
10,
7,
4,
2,
2,
2,
1,
0,
0,
0,
0,
0,
0
] | [] | [] | [
"null",
"typescript"
] | stackoverflow_0043118692_null_typescript.txt |
Q:
Having bug with destroy and update method in Laravel
I'm trying to create simple Laravel application to use resource controller, but i'm having some kind of bug in update and destroy method when I pass the object itself to method.
My update and destroy functions are simple, they just take $todoList and either updates or deletes that object. However, they seem not working as expected. I try to dd($todoList) and it returns correct value, but does not update the database.
public function update(TodoList $todoList)
{
$todoList->update(['completed' => 1]);
return redirect()->route('todos.index');
}
public function destroy(TodoList $todoList)
{
$todoList->delete();
return redirect()->route('todos.index');
}
My blade template looks like this:
<form action="{{route('todos.destroy', $todo)}}" method="POST">
@csrf
@method('DELETE')
<button type="submit" class="btn btn-link btn-sm float-end"><i class="fas fa-trash"></i></button>
</form>
<form action="{{route('todos.update', $todo)}}" method="POST">
<span @class([$todo->completed ? 'bg-success':''])>{{$todo->content}}</span>
@csrf
@method('PATCH')
<button type="submit" class="btn btn-link btn-sm float-end"><i class="fas fa-edit"></i></button>
</form>
I have also tried to pass $todo-id to route, but none seem to be working. I have checked documentation and I'm usually working with documentations, but this seems to be interesting bug to me.
My route is
Route::resource('/todos', TodoListController::class);
and finally my Model is like following
class TodoList extends Model
{
use HasFactory;
protected $fillable = ['content'];
protected $guarded = [];
}
| Having bug with destroy and update method in Laravel | I'm trying to create simple Laravel application to use resource controller, but i'm having some kind of bug in update and destroy method when I pass the object itself to method.
My update and destroy functions are simple, they just take $todoList and either updates or deletes that object. However, they seem not working as expected. I try to dd($todoList) and it returns correct value, but does not update the database.
public function update(TodoList $todoList)
{
$todoList->update(['completed' => 1]);
return redirect()->route('todos.index');
}
public function destroy(TodoList $todoList)
{
$todoList->delete();
return redirect()->route('todos.index');
}
My blade template looks like this:
<form action="{{route('todos.destroy', $todo)}}" method="POST">
@csrf
@method('DELETE')
<button type="submit" class="btn btn-link btn-sm float-end"><i class="fas fa-trash"></i></button>
</form>
<form action="{{route('todos.update', $todo)}}" method="POST">
<span @class([$todo->completed ? 'bg-success':''])>{{$todo->content}}</span>
@csrf
@method('PATCH')
<button type="submit" class="btn btn-link btn-sm float-end"><i class="fas fa-edit"></i></button>
</form>
I have also tried to pass $todo-id to route, but none seem to be working. I have checked documentation and I'm usually working with documentations, but this seems to be interesting bug to me.
My route is
Route::resource('/todos', TodoListController::class);
and finally my Model is like following
class TodoList extends Model
{
use HasFactory;
protected $fillable = ['content'];
protected $guarded = [];
}
| [] | [] | [
"I think the problem is the way you define the route. You should remove the / in the Route::resource('todos', TodoListController::class);\n"
] | [
-1
] | [
"laravel",
"php"
] | stackoverflow_0074675255_laravel_php.txt |
Q:
How to get rid of "undefined reference to 'Class::member'"? in conjunction with initialization of an array
When building my code I get the following "undefined reference"-errors, which I cannot get rid of. I've already tried several hints from stack overflow but nothing helps :-(. Maybe you have an idea?
I use VSCode with PlatformIO for an Arduino Uno on Mac OS.
in function `get7SegBitMap':
/Users/christian/Projekt/src/charmap7seg.cpp:70: undefined reference to 'Led7SegmentCharMap::bitMap'
/Users/christian/Projekt/src/charmap7seg.cpp:70: undefined reference to `Led7SegmentCharMap::bitMap'
collect2: error: ld returned 1 exit status
The hierarchy is:
main.cpp includes ledmatrix.hpp
ledmatrix.cpp includes ledmatrix.hpp
ledmatrix.hpp includes charmap7seg.hpp
charmap7seg.cpp includes charmap7seg.hpp
charmap7seg.hpp
#pragma once
#include <Arduino.h>
class Led7SegmentCharMap {
private:
static const uint8_t bitMap[]; // will be initialized in cpp-file
uint8_t getCharMapIndex(const unsigned char outChar);
public:
// Konstruktur
Led7SegmentCharMap();
// BitMap zur Darstellung auf der 7-Segment-Anzeige für outChar ermitteln
uint8_t get7SegBitMap(const unsigned char outChar);
};
int set7SegValue(const LedMatrixPos pos, const uint8_t charBitMap);
charmap7seg.cpp
#include <Arduino.h>
#include <charmap7seg.hpp>
// Konstruktur
Led7SegmentCharMap::Led7SegmentCharMap() {
uint8_t bitMap[] = { ///< charMap contains bitmaps for 7-seg-displays
//gfedcba
0b0111111, ///< "0": Segments f, e, d, c, b, a --> bitMap[0]
0b0000110, ///< "1": Segments c, b --> bitMap[1]
0b1011011, ///< "2": Segments g, e, d, b, a --> bitMap[2]
(...)
}
(void)bitMap; // to suppress the compiler warning "unused variable"
};
uint8_t Led7SegmentCharMap::get7SegBitMap(const unsigned char outChar) {
return bitMap[getCharMapIndex(outChar)]; // <===== this is line 70
};
(...)
ledmatrix.hpp
#pragma once
#include <Arduino.h>
#include <charmap7seg.hpp>
class LedMatrix {
private:
Led7SegmentCharMap charMap;
(...)
public:
Led7SegmentCharMap(); // Konstruktor
uint8_t get7SegBitMap(const unsigned char outChar);
void LedMatrix::display(const String outString);
(...)
ledmatrix.cpp
#include <ledmatrix.hpp>
(...)
void LedMatrix::display(const String outString) {
(...) // get a char out of outString --> outChar
uint8_t charBitMap = charMap.get7SegBitMap(outChar); // get 7-seg-"bitmap"
(...)
};
(...)
My expection is that all dependencies are fulfilled (which is not true regarding the error messages). I had some trouble with initializing the bitMap-array. Maybe the undefined reference error is related to that?
A:
Today I found a hint here: here.
With this I've got a compilable solution. In the cpp file it is necessary to move the bitMap declaration out of the constructor and do the initialization there. See new cpp file below.
The only issue left is, that bitMapis now public instead of private.
So the initial question
How to get rid of "undefined reference to 'Class::member'"? in conjunction with initialization of an array
was wrong and should be:
How to declare an c-array in header file and define it in the cpp-file without specifying the size/dimension?
charmap7seg.hpp
class Led7SegmentCharMap {
public:
static const uint8_t bitMap[];
// BitMap zur Darstellung auf der 7-Segment-Anzeige für outChar ermitteln
uint8_t get7SegBitMap(const unsigned char outChar);
private:
uint8_t getCharMapIndex(const unsigned char outChar);
};
charmap7seg.cpp
const uint8_t Led7SegmentCharMap::bitMap[] = { ///< charMap contains bitmaps for 7-seg-displays
///< bzw. den Buchstaben darstellen.
0b0111111, ///< "0": Segmente f, e, d, c, b, a --> bitMap[0]
0b0000110, ///< "1": Segmente c, b --> bitMap[1]
0b1011011, ///< "2": Segmente g, e, d, b, a --> bitMap[2]
};
// BitMap zur Darstellung auf der 7-Segment-Anzeige aus Zeichen bzw. Index ermitteln
uint8_t Led7SegmentCharMap::get7SegBitMap(const unsigned char outChar) {
return bitMap[getCharMapIndex(outChar)];
};
| How to get rid of "undefined reference to 'Class::member'"? in conjunction with initialization of an array | When building my code I get the following "undefined reference"-errors, which I cannot get rid of. I've already tried several hints from stack overflow but nothing helps :-(. Maybe you have an idea?
I use VSCode with PlatformIO for an Arduino Uno on Mac OS.
in function `get7SegBitMap':
/Users/christian/Projekt/src/charmap7seg.cpp:70: undefined reference to 'Led7SegmentCharMap::bitMap'
/Users/christian/Projekt/src/charmap7seg.cpp:70: undefined reference to `Led7SegmentCharMap::bitMap'
collect2: error: ld returned 1 exit status
The hierarchy is:
main.cpp includes ledmatrix.hpp
ledmatrix.cpp includes ledmatrix.hpp
ledmatrix.hpp includes charmap7seg.hpp
charmap7seg.cpp includes charmap7seg.hpp
charmap7seg.hpp
#pragma once
#include <Arduino.h>
class Led7SegmentCharMap {
private:
static const uint8_t bitMap[]; // will be initialized in cpp-file
uint8_t getCharMapIndex(const unsigned char outChar);
public:
// Konstruktur
Led7SegmentCharMap();
// BitMap zur Darstellung auf der 7-Segment-Anzeige für outChar ermitteln
uint8_t get7SegBitMap(const unsigned char outChar);
};
int set7SegValue(const LedMatrixPos pos, const uint8_t charBitMap);
charmap7seg.cpp
#include <Arduino.h>
#include <charmap7seg.hpp>
// Konstruktur
Led7SegmentCharMap::Led7SegmentCharMap() {
uint8_t bitMap[] = { ///< charMap contains bitmaps for 7-seg-displays
//gfedcba
0b0111111, ///< "0": Segments f, e, d, c, b, a --> bitMap[0]
0b0000110, ///< "1": Segments c, b --> bitMap[1]
0b1011011, ///< "2": Segments g, e, d, b, a --> bitMap[2]
(...)
}
(void)bitMap; // to suppress the compiler warning "unused variable"
};
uint8_t Led7SegmentCharMap::get7SegBitMap(const unsigned char outChar) {
return bitMap[getCharMapIndex(outChar)]; // <===== this is line 70
};
(...)
ledmatrix.hpp
#pragma once
#include <Arduino.h>
#include <charmap7seg.hpp>
class LedMatrix {
private:
Led7SegmentCharMap charMap;
(...)
public:
Led7SegmentCharMap(); // Konstruktor
uint8_t get7SegBitMap(const unsigned char outChar);
void LedMatrix::display(const String outString);
(...)
ledmatrix.cpp
#include <ledmatrix.hpp>
(...)
void LedMatrix::display(const String outString) {
(...) // get a char out of outString --> outChar
uint8_t charBitMap = charMap.get7SegBitMap(outChar); // get 7-seg-"bitmap"
(...)
};
(...)
My expection is that all dependencies are fulfilled (which is not true regarding the error messages). I had some trouble with initializing the bitMap-array. Maybe the undefined reference error is related to that?
| [
"Today I found a hint here: here.\nWith this I've got a compilable solution. In the cpp file it is necessary to move the bitMap declaration out of the constructor and do the initialization there. See new cpp file below.\nThe only issue left is, that bitMapis now public instead of private.\nSo the initial question\n\nHow to get rid of \"undefined reference to 'Class::member'\"? in conjunction with initialization of an array\n\nwas wrong and should be:\n\nHow to declare an c-array in header file and define it in the cpp-file without specifying the size/dimension?\n\ncharmap7seg.hpp\nclass Led7SegmentCharMap {\npublic:\n static const uint8_t bitMap[];\n\n // BitMap zur Darstellung auf der 7-Segment-Anzeige für outChar ermitteln\n uint8_t get7SegBitMap(const unsigned char outChar);\n\nprivate:\n uint8_t getCharMapIndex(const unsigned char outChar);\n};\n\ncharmap7seg.cpp\nconst uint8_t Led7SegmentCharMap::bitMap[] = { ///< charMap contains bitmaps for 7-seg-displays\n ///< bzw. den Buchstaben darstellen.\n 0b0111111, ///< \"0\": Segmente f, e, d, c, b, a --> bitMap[0]\n 0b0000110, ///< \"1\": Segmente c, b --> bitMap[1]\n 0b1011011, ///< \"2\": Segmente g, e, d, b, a --> bitMap[2]\n};\n\n// BitMap zur Darstellung auf der 7-Segment-Anzeige aus Zeichen bzw. Index ermitteln\nuint8_t Led7SegmentCharMap::get7SegBitMap(const unsigned char outChar) {\n return bitMap[getCharMapIndex(outChar)];\n};\n\n"
] | [
0
] | [
"erase the uint8_t. you are redeclaring the bitmap and it's going out of scope when the constructor ends. instead do:\nthis->bitMap[] = { ///< charMap contains bitmaps for 7-seg-displays\n //gfedcba\n 0b0111111, ///< \"0\": Segments f, e, d, c, b, a --> bitMap[0]\n 0b0000110, ///< \"1\": Segments c, b --> bitMap[1]\n 0b1011011, ///< \"2\": Segments g, e, d, b, a --> bitMap[2]\n (...)\n }\n\n"
] | [
-1
] | [
"arduino_c++",
"c++11",
"platformio"
] | stackoverflow_0074671674_arduino_c++_c++11_platformio.txt |
Q:
ERROR: JAVA_HOME is not set and no 'java' command could be found in your flutter PATH. In Flutter
I installed Android Studio 4.1 and tried to run the existing project. But it gives an error like this:
ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation.
A:
You have to set the JAVA_HOME Environment Variable.
On Windows, I solved the issue as follows:
Download the Java JDK from here and install it. (This links to version 15, which requires you to create an account in order to download. Version 16 is available to download without creating an account, but it has caused me an error that required me to downgrade.)
Set the "JAVA_HOME" Environment Variable:
Open Windows Search, type in “env”, and choose “Edit the system environment variables”.
Click on "Environment Variables...".
Click on "New".
Fill in the "variable name" field with "JAVA_HOME".
Fill in the "variable value" with the path to where Java is installed on your computer. (for me it was under "C:\Program Files\Java\<jdkversion>".)
Click "OK" and close all dialogs.
Restart your IDE / Terminal. (do not skip this step)
A:
On Ubuntu 20.04 with Android 4.1, this error can be solved using the Snap version with:
flutter config --android-studio-dir=/snap/android-studio/current/android-studio
A:
For Windows :
flutter config --android-studio-dir="C:\Program Files\Android\Android Studio"
This command works fine if you have Android Studio installed. Flutter can understand the configurations of the Android Studio and you don't have to provide JAVA_HOME anymore.
Edit (Bonus) :
Also, download "Android SDK Command-line Tools" in android studio for android-licenses. Steps are shown in the image below.
A:
You need to install Java JDK software which is required for the android studio.
sudo apt install openjdk-11-jdk
A:
UPDATE: run flutter upgrade to upgrade to Flutter 1.22.2, which fixes compatibility issues with Android Studio 4.1. See related Flutter release.
This issue has been resolved v
See this issue regarding Android Studio 4.1 and Flutter
You can either downgrade back to Android Studio 4.0.2 or you can
install the latest JDK and update your JAVA_HOME variable and
PATH (OS dependent).
Note that if you choose to continue using 4.1 instead of downgrading,
flutter doctor will tell you Android Studio is not installed.
You need to run flutter config --android-studio-dir='<path to android studio>' in order to fix this issue. flutter doctor will now also
incorrectly report that the Flutter and Dart plugins are not
installed even if they are - you can ignore this.
A:
Install Android Studio (skip this step if you already installed)
Goto
C:\Program Files\Android\Android Studio\jre and copy the path.
Make New variable called "JAVA_HOME". And paste above path as the variable value.
A:
I set my JAVA HOME to the path,
/snap/android-studio/97/android-studio/jre/
Then I set my android studio config in flutter by using this command:
flutter config --android-studio-dir=/snap/android-studio/current/android-studio
My system is :
Ubuntu 20.04 with Flutter plugin version as 4.1,
A:
In Mac:
In your terminal run
ECHO $JAVA_HOME
This will bring up the current path of java home.Pay close attention to the path that the terminal outputs.In my case i had System/Library instead of /Library/.../... typo as my path.So i simply corrected this in bash_profile.
If your path is correct but the error persists,
Right Click On Finder
Go To Folder
Bash_Profile
Add or Edit the Java Home Path
The path you add here should be the one your terminal output when you ran Echo $Java_Home..As long as this path that was echoed was/is correct..the below steps should fix the issue.
Then open your zshrc in a similar manner as you did with bash_profile and add the home path to the file.
You can also edit the bash_profile and zshrc files in your text editor.These files can be displayed using cmd + shift + . in your home folder.
After that,save,exit the terminal, close android studio and restart your machine.
Read this Article to learn more.
A:
For ubuntu 20.04 this solution will be appropriate: -
This link solved my problem. In short I would recommend to follow the simple steps, i.e.
Upgrade the flutter using flutter upgrade
Set the path of the flutter flutter config --android-studio-dir="/path/to/android/studio/"
Set the path in bashrc file by opening bashrc file(it is a hidden file)
export JAVA_HOME="/path/to/android/studio/jre/"
A:
Install openjdk-8 ....
You have to restart your terminal, the issue will persist even after download if you don't restart the terminal.
Follow react native guide to download openjdk for your environment. ...
https://reactnative.dev/docs/environment-setup
A:
choco install javaruntime
If you use choco package manager. It will set the vars automatically.
| ERROR: JAVA_HOME is not set and no 'java' command could be found in your flutter PATH. In Flutter | I installed Android Studio 4.1 and tried to run the existing project. But it gives an error like this:
ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation.
| [
"You have to set the JAVA_HOME Environment Variable.\nOn Windows, I solved the issue as follows:\n\nDownload the Java JDK from here and install it. (This links to version 15, which requires you to create an account in order to download. Version 16 is available to download without creating an account, but it has caused me an error that required me to downgrade.)\n\nSet the \"JAVA_HOME\" Environment Variable:\n\nOpen Windows Search, type in “env”, and choose “Edit the system environment variables”.\n\n\nClick on \"Environment Variables...\".\n\nClick on \"New\".\n\nFill in the \"variable name\" field with \"JAVA_HOME\".\n\nFill in the \"variable value\" with the path to where Java is installed on your computer. (for me it was under \"C:\\Program Files\\Java\\<jdkversion>\".)\n\n\nClick \"OK\" and close all dialogs.\n\n\n\nRestart your IDE / Terminal. (do not skip this step)\n\n\n",
"On Ubuntu 20.04 with Android 4.1, this error can be solved using the Snap version with:\nflutter config --android-studio-dir=/snap/android-studio/current/android-studio\n\n",
"For Windows :\nflutter config --android-studio-dir=\"C:\\Program Files\\Android\\Android Studio\"\n\nThis command works fine if you have Android Studio installed. Flutter can understand the configurations of the Android Studio and you don't have to provide JAVA_HOME anymore.\nEdit (Bonus) :\nAlso, download \"Android SDK Command-line Tools\" in android studio for android-licenses. Steps are shown in the image below.\n\n",
"You need to install Java JDK software which is required for the android studio.\nsudo apt install openjdk-11-jdk\n\n",
"UPDATE: run flutter upgrade to upgrade to Flutter 1.22.2, which fixes compatibility issues with Android Studio 4.1. See related Flutter release.\nThis issue has been resolved v\n\nSee this issue regarding Android Studio 4.1 and Flutter\nYou can either downgrade back to Android Studio 4.0.2 or you can\ninstall the latest JDK and update your JAVA_HOME variable and\nPATH (OS dependent).\nNote that if you choose to continue using 4.1 instead of downgrading,\nflutter doctor will tell you Android Studio is not installed.\nYou need to run flutter config --android-studio-dir='<path to android studio>' in order to fix this issue. flutter doctor will now also\nincorrectly report that the Flutter and Dart plugins are not\ninstalled even if they are - you can ignore this.\n\n",
"\nInstall Android Studio (skip this step if you already installed)\n\nGoto\nC:\\Program Files\\Android\\Android Studio\\jre and copy the path.\n\nMake New variable called \"JAVA_HOME\". And paste above path as the variable value.\n\n\n\n",
"I set my JAVA HOME to the path,\n/snap/android-studio/97/android-studio/jre/\nThen I set my android studio config in flutter by using this command:\nflutter config --android-studio-dir=/snap/android-studio/current/android-studio\nMy system is :\nUbuntu 20.04 with Flutter plugin version as 4.1,\n",
"In Mac:\nIn your terminal run\nECHO $JAVA_HOME\nThis will bring up the current path of java home.Pay close attention to the path that the terminal outputs.In my case i had System/Library instead of /Library/.../... typo as my path.So i simply corrected this in bash_profile.\nIf your path is correct but the error persists,\nRight Click On Finder\nGo To Folder\n\nBash_Profile\n\nAdd or Edit the Java Home Path\nThe path you add here should be the one your terminal output when you ran Echo $Java_Home..As long as this path that was echoed was/is correct..the below steps should fix the issue.\n\n\nThen open your zshrc in a similar manner as you did with bash_profile and add the home path to the file.\n\nYou can also edit the bash_profile and zshrc files in your text editor.These files can be displayed using cmd + shift + . in your home folder.\n\nAfter that,save,exit the terminal, close android studio and restart your machine.\nRead this Article to learn more.\n",
"For ubuntu 20.04 this solution will be appropriate: - \nThis link solved my problem. In short I would recommend to follow the simple steps, i.e.\n\nUpgrade the flutter using flutter upgrade\nSet the path of the flutter flutter config --android-studio-dir=\"/path/to/android/studio/\"\nSet the path in bashrc file by opening bashrc file(it is a hidden file)\nexport JAVA_HOME=\"/path/to/android/studio/jre/\"\n\n",
"Install openjdk-8 ....\nYou have to restart your terminal, the issue will persist even after download if you don't restart the terminal.\nFollow react native guide to download openjdk for your environment. ...\nhttps://reactnative.dev/docs/environment-setup\n",
"choco install javaruntime\n\nIf you use choco package manager. It will set the vars automatically.\n"
] | [
52,
25,
18,
13,
10,
5,
3,
3,
1,
0,
0
] | [
"Download Liberica JDK and on installation it will automatically set JAVA_HOME environment variable.\nNow you just go to Android Studio -> Project Structure -> SDK -> Point all jdk path to liberica jdk installation directory and apply.\nYou are done. Just restart android studio and gradle command will work.\n"
] | [
-1
] | [
"android",
"android_studio",
"flutter"
] | stackoverflow_0064359564_android_android_studio_flutter.txt |
Q:
Permutation algorithm C++
I try to translate an algorithm that generates all permutations of k out of n in C++ :
public void calculerEquipeTOT(ArrayList<Nageur> L, ArrayList<Nageur> F, int k) {
if (k == 0) {
if (calculerPointsTOT(L) > this.pointsMeilleureEquipe){
this.meilleureEquipe = L;
this.pointsMeilleureEquipe = calculerPointsTOT(meilleureEquipe);
}
} else {
for (Nageur x : F) {
ArrayList<Nageur> G = new ArrayList<Nageur>(F);
G.remove(G.indexOf(x));
ArrayList<Nageur> L2 = new ArrayList<Nageur>(L);
L2.add(x);
calculerEquipeTOT(L2, G, k - 1);
}
}
}
My problem is that the Lists could be Objects list and I don't know how to remove the x of the L2 list... I am not a C++ specialist, I managed it in Java but I have to do it in C++.
A:
I have transliterated your function and I have gotten the following
#include <iostream>
#include <list>
#include <iterator>
void arrangements( std::list<char> l, std::list<char> f, size_t k )
{
if ( k == 0 )
{
for ( char c : l ) std::cout << c << ' ';
std::cout << std::endl;
}
else
{
for ( auto it = f.begin(); it != f.end(); ++it )
{
std::list<char> g( f.begin(), it );
g.insert( g.end(), std::next( it ), f.end() );
std::list<char> l2( l );
l2.push_back( *it );
arrangements( l2, g , k-1 );
}
}
}
int main()
{
std::list<char> f = { 'A', 'B', 'C', 'D' };
arrangements( std::list<char>(), f, 2 );
}
The program output is
A B
A C
A D
B A
B C
B D
C A
C B
C D
D A
D B
D C
I do not know whether it is what you want to get.
If to call the function with k equal to 3 then the program output will be
A B C
A B D
A C B
A C D
A D B
A D C
B A C
B A D
B C A
B C D
B D A
B D C
C A B
C A D
C B A
C B D
C D A
C D B
D A B
D A C
D B A
D B C
D C A
D C B
A:
I found a way to do what I want using next_permutation() from standard library and also another next_combination() from this article : http://www.codeguru.com/cpp/cpp/algorithms/combinations/article.php/c5117/Combinations-in-C.htm
My solution :
int main(int argc, const char * argv[]) {
cout << "Hello, World!\n";
int nb = 0;
int tab1[] = {0,1,2,3};
vector<int> n (tab1, tab1+sizeof tab1 / sizeof tab1[0]);
int tab2[] = {0,1};
vector<int> r (tab2, tab2+sizeof tab2 / sizeof tab2[0]);
sort (n.begin(), n.end());
do
{
sort(r.begin(),r.end());
//do your processing on the new combination here
vector<int> r2 = r;
do
{
//do your processing on the new permutation here
nb++;
display_vector(r2);
cout << endl;
}
while(next_permutation(r2.begin(),r2.end()));
}
while(next_combination(n.begin(),n.end(),r.begin(),r.end() ));
cout << "Number of possibilities = " << nb << endl;
return 0;
}
That displays :
Hello, World!
0 1
1 0
0 2
2 0
0 3
3 0
1 2
2 1
1 3
3 1
2 3
3 2
Number of possibilities = 12
It takes less than 1s to find all 10 permutations out of 12 on my computer... I don't know if this is good algorithm but it's faster than my previous algorithm in Java.
If someone see how to improve and optimize it I am interested ! :)
A:
Call permute(nums) and pass nums as a vector that will return another vector containing all permuted numbers.
void getPermutation(vector<int>& nums,vector<int> current_indices, vector<vector<int>>& permutation)
{
if (nums.size()==current_indices.size())
{
vector <int> temp;
for(auto index:current_indices)
temp.push_back(nums[index]);
permutation.push_back(temp);
return;
}
vector <int> remaining_indices;
for (int i=0;i<nums.size();i++)
{
if(std::find(current_indices.begin(), current_indices.end(), i) != current_indices.end())
{
//do nothing
}
else
{
remaining_indices.push_back(i);
}
}
while (remaining_indices.size()>0)
{
vector<int> temp = current_indices;
temp.push_back(remaining_indices[0]);
getPermutation(nums,temp,permutation);
remaining_indices.erase(remaining_indices.begin());
}
return;
}
vector<vector<int>> permute(vector<int>& nums) {
vector<vector<int>> permutation;
vector<int> current_indices;
getPermutation( nums,current_indices, permutation);
return permutation;
}
| Permutation algorithm C++ | I try to translate an algorithm that generates all permutations of k out of n in C++ :
public void calculerEquipeTOT(ArrayList<Nageur> L, ArrayList<Nageur> F, int k) {
if (k == 0) {
if (calculerPointsTOT(L) > this.pointsMeilleureEquipe){
this.meilleureEquipe = L;
this.pointsMeilleureEquipe = calculerPointsTOT(meilleureEquipe);
}
} else {
for (Nageur x : F) {
ArrayList<Nageur> G = new ArrayList<Nageur>(F);
G.remove(G.indexOf(x));
ArrayList<Nageur> L2 = new ArrayList<Nageur>(L);
L2.add(x);
calculerEquipeTOT(L2, G, k - 1);
}
}
}
My problem is that the Lists could be Objects list and I don't know how to remove the x of the L2 list... I am not a C++ specialist, I managed it in Java but I have to do it in C++.
| [
"I have transliterated your function and I have gotten the following\n#include <iostream>\n#include <list>\n#include <iterator>\n\nvoid arrangements( std::list<char> l, std::list<char> f, size_t k ) \n{\n if ( k == 0 ) \n {\n for ( char c : l ) std::cout << c << ' ';\n std::cout << std::endl;\n } \n else \n {\n for ( auto it = f.begin(); it != f.end(); ++it )\n {\n std::list<char> g( f.begin(), it );\n g.insert( g.end(), std::next( it ), f.end() );\n\n std::list<char> l2( l );\n l2.push_back( *it );\n\n arrangements( l2, g , k-1 );\n }\n }\n}\n\nint main()\n{\n std::list<char> f = { 'A', 'B', 'C', 'D' };\n\n arrangements( std::list<char>(), f, 2 );\n}\n\nThe program output is\nA B \nA C \nA D \nB A \nB C \nB D \nC A \nC B \nC D \nD A \nD B \nD C \n\nI do not know whether it is what you want to get.\nIf to call the function with k equal to 3 then the program output will be\nA B C \nA B D \nA C B \nA C D \nA D B \nA D C \nB A C \nB A D \nB C A \nB C D \nB D A \nB D C \nC A B \nC A D \nC B A \nC B D \nC D A \nC D B \nD A B \nD A C \nD B A \nD B C \nD C A \nD C B \n\n",
"I found a way to do what I want using next_permutation() from standard library and also another next_combination() from this article : http://www.codeguru.com/cpp/cpp/algorithms/combinations/article.php/c5117/Combinations-in-C.htm\nMy solution : \nint main(int argc, const char * argv[]) {\n\n cout << \"Hello, World!\\n\";\n int nb = 0;\n\n int tab1[] = {0,1,2,3};\n vector<int> n (tab1, tab1+sizeof tab1 / sizeof tab1[0]);\n int tab2[] = {0,1};\n vector<int> r (tab2, tab2+sizeof tab2 / sizeof tab2[0]);\n\n sort (n.begin(), n.end());\n do\n {\n sort(r.begin(),r.end());\n //do your processing on the new combination here\n vector<int> r2 = r;\n do\n {\n //do your processing on the new permutation here\n nb++;\n display_vector(r2);\n cout << endl;\n }\n while(next_permutation(r2.begin(),r2.end()));\n }\n while(next_combination(n.begin(),n.end(),r.begin(),r.end() ));\n\n cout << \"Number of possibilities = \" << nb << endl;\n\n return 0;\n}\n\nThat displays :\nHello, World!\n0 1 \n1 0 \n0 2 \n2 0 \n0 3 \n3 0 \n1 2 \n2 1 \n1 3 \n3 1 \n2 3 \n3 2 \nNumber of possibilities = 12\n\nIt takes less than 1s to find all 10 permutations out of 12 on my computer... I don't know if this is good algorithm but it's faster than my previous algorithm in Java. \nIf someone see how to improve and optimize it I am interested ! :)\n",
"Call permute(nums) and pass nums as a vector that will return another vector containing all permuted numbers.\nvoid getPermutation(vector<int>& nums,vector<int> current_indices, vector<vector<int>>& permutation)\n {\n if (nums.size()==current_indices.size()) \n {\n vector <int> temp;\n for(auto index:current_indices)\n temp.push_back(nums[index]);\n permutation.push_back(temp);\n\n return;\n }\n\n vector <int> remaining_indices;\n for (int i=0;i<nums.size();i++)\n {\n if(std::find(current_indices.begin(), current_indices.end(), i) != current_indices.end()) \n {\n //do nothing\n } \n else \n {\n remaining_indices.push_back(i);\n }\n }\n\n while (remaining_indices.size()>0)\n {\n vector<int> temp = current_indices;\n temp.push_back(remaining_indices[0]);\n getPermutation(nums,temp,permutation);\n remaining_indices.erase(remaining_indices.begin());\n }\n\n return;\n\n }\n vector<vector<int>> permute(vector<int>& nums) {\n\n vector<vector<int>> permutation;\n vector<int> current_indices; \n getPermutation( nums,current_indices, permutation);\n \n return permutation;\n\n \n }\n\n"
] | [
3,
3,
0
] | [] | [] | [
"algorithm",
"c++",
"permutation"
] | stackoverflow_0030698788_algorithm_c++_permutation.txt |
Q:
Pinescript IF Statement Array
Hi i am struggling to get my array in Pinescript to produce anything other than a list of Nan. I am trying to create an array of the % difference of the low and 20sma when price bounces off the 20sma but currently when i print the array it only has Nan values.
sma_20 = sma(close,20)
sma_20_touch_band = open>sma_20 and low<=sma_20
sma_20_dif = ((low-sma_20)/sma_20)
sma_20_array = array.new_float(100)
if sma_20_touch_band
array.push(sma_20_array, sma_20_dif)
array.shift(sma_20_array)
A:
That is most likely caused by not using a var array. Without the var keyword, your array will be re-initialized on each bar. You need to initialze your array once, and manipulate its elements later on. Therefore make it:
var sma_20_array = array.new_float(100)
Also, I'm not so sure about your usage of the array.shift() function.
You push something to the array, but with the array.shift() you remove the first element from the array. At the end of the day, you remove what you have just added. At least this is what I think is happening.
| Pinescript IF Statement Array | Hi i am struggling to get my array in Pinescript to produce anything other than a list of Nan. I am trying to create an array of the % difference of the low and 20sma when price bounces off the 20sma but currently when i print the array it only has Nan values.
sma_20 = sma(close,20)
sma_20_touch_band = open>sma_20 and low<=sma_20
sma_20_dif = ((low-sma_20)/sma_20)
sma_20_array = array.new_float(100)
if sma_20_touch_band
array.push(sma_20_array, sma_20_dif)
array.shift(sma_20_array)
| [
"That is most likely caused by not using a var array. Without the var keyword, your array will be re-initialized on each bar. You need to initialze your array once, and manipulate its elements later on. Therefore make it:\nvar sma_20_array = array.new_float(100)\n\nAlso, I'm not so sure about your usage of the array.shift() function.\nYou push something to the array, but with the array.shift() you remove the first element from the array. At the end of the day, you remove what you have just added. At least this is what I think is happening.\n"
] | [
0
] | [] | [] | [
"arrays",
"if_statement",
"pine_script"
] | stackoverflow_0074674685_arrays_if_statement_pine_script.txt |
Q:
Call an async function with javascript
I am trying to call an async function but I am getting an error
getUsersList(db).then is not a function
this is my code
async function getUsersList(db) {
const userCol = collection(db, 'Users');
const userSnapshot = await getDocs(userCol);
const tempUserList = userSnapshot.docs.map(doc => doc.data());
return tempUserList;
}
function App() {
const app = initializeApp(firebaseConfig);
const db = getFirestore(app);
const auth = getAuth(app);
var currentUser = auth.currentUser;
if(currentUser != null){
getUsersList(db).then((value) => {
console.log(value);
});
I also tried using await getUsersList but got the following error
Unexpected reserved word 'await'
A:
So what I assume you're trying to do here const userSnapshot = await getDocs(userCol); is fetch some data that is then going to be used in your react component to render something (Maybe there's a fetch request in getDocs ?)
There is no return in your react component (but that's not what's causing your issue).
As it is it won't work using await since App() isn't an async function BUT you can't make it an async function since this is a standard react component.
What do you want to happen whilst waiting for the data to be fetched (= whilst your promise is pending) ? If you're happy to display nothing, why not just remove await before getDocs() ?
For more on this topic :
React: async and await not working with fetch
React Hooks: how to wait for the data to be fetched before rendering
| Call an async function with javascript | I am trying to call an async function but I am getting an error
getUsersList(db).then is not a function
this is my code
async function getUsersList(db) {
const userCol = collection(db, 'Users');
const userSnapshot = await getDocs(userCol);
const tempUserList = userSnapshot.docs.map(doc => doc.data());
return tempUserList;
}
function App() {
const app = initializeApp(firebaseConfig);
const db = getFirestore(app);
const auth = getAuth(app);
var currentUser = auth.currentUser;
if(currentUser != null){
getUsersList(db).then((value) => {
console.log(value);
});
I also tried using await getUsersList but got the following error
Unexpected reserved word 'await'
| [
"So what I assume you're trying to do here const userSnapshot = await getDocs(userCol); is fetch some data that is then going to be used in your react component to render something (Maybe there's a fetch request in getDocs ?)\n\nThere is no return in your react component (but that's not what's causing your issue).\n\nAs it is it won't work using await since App() isn't an async function BUT you can't make it an async function since this is a standard react component.\n\nWhat do you want to happen whilst waiting for the data to be fetched (= whilst your promise is pending) ? If you're happy to display nothing, why not just remove await before getDocs() ?\n\n\nFor more on this topic :\nReact: async and await not working with fetch\nReact Hooks: how to wait for the data to be fetched before rendering\n"
] | [
0
] | [] | [] | [
"javascript",
"reactjs"
] | stackoverflow_0074675045_javascript_reactjs.txt |
Q:
Why does left_join produces NA values
I have these data frames and I want to merge them with left_join, based on the peak column.
However, any time that I am trying I am taking NA values
can you help me why?
library(tidyverse)
df1 <- tibble(peak=c("peak1","peak2","peak3"),
coord1=c(100,500,1000),
coord2=c(250,700,1250))
df1
#> # A tibble: 3 × 3
#> peak coord1 coord2
#> <chr> <dbl> <dbl>
#> 1 peak1 100 250
#> 2 peak2 500 700
#> 3 peak3 1000 1250
df2 <- tibble(peak=c("peak5","peak6","peak7"),
coord1=c(120,280,600),
coord2=c(300,400,850))
df2
#> # A tibble: 3 × 3
#> peak coord1 coord2
#> <chr> <dbl> <dbl>
#> 1 peak5 120 300
#> 2 peak6 280 400
#> 3 peak7 600 850
dplyr::left_join(df1, df2, by="peak")
#> # A tibble: 3 × 5
#> peak coord1.x coord2.x coord1.y coord2.y
#> <chr> <dbl> <dbl> <dbl> <dbl>
#> 1 peak1 100 250 NA NA
#> 2 peak2 500 700 NA NA
#> 3 peak3 1000 1250 NA NA
Created on 2022-12-04 with reprex v2.0.2
A:
Assuming that your data is the same as the previous question.
data <- list(df1, df2, df3)
> data
[[1]]
# A tibble: 3 × 3
peak coord1 coord2
<chr> <dbl> <dbl>
1 peak1 100 250
2 peak2 500 700
3 peak3 1000 1250
[[2]]
# A tibble: 3 × 3
peak coord1 coord2
<chr> <dbl> <dbl>
1 peak5 120 300
2 peak6 280 400
3 peak7 900 1850
[[3]]
# A tibble: 3 × 3
peak coord1 coord2
<chr> <dbl> <dbl>
1 peak8 900 2000
2 peak9 3000 3400
3 peak10 5600 5850
map(data, ~ .x %>%
mutate(peak = str_c("peak", 1:nrow(.)))) %>%
reduce(left_join, by = "peak")
# A tibble: 3 × 7
peak coord1.x coord2.x coord1.y coord2.y coord1 coord2
<chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
1 peak1 100 250 120 300 900 2000
2 peak2 500 700 280 400 3000 3400
3 peak3 1000 1250 900 1850 5600 5850
| Why does left_join produces NA values | I have these data frames and I want to merge them with left_join, based on the peak column.
However, any time that I am trying I am taking NA values
can you help me why?
library(tidyverse)
df1 <- tibble(peak=c("peak1","peak2","peak3"),
coord1=c(100,500,1000),
coord2=c(250,700,1250))
df1
#> # A tibble: 3 × 3
#> peak coord1 coord2
#> <chr> <dbl> <dbl>
#> 1 peak1 100 250
#> 2 peak2 500 700
#> 3 peak3 1000 1250
df2 <- tibble(peak=c("peak5","peak6","peak7"),
coord1=c(120,280,600),
coord2=c(300,400,850))
df2
#> # A tibble: 3 × 3
#> peak coord1 coord2
#> <chr> <dbl> <dbl>
#> 1 peak5 120 300
#> 2 peak6 280 400
#> 3 peak7 600 850
dplyr::left_join(df1, df2, by="peak")
#> # A tibble: 3 × 5
#> peak coord1.x coord2.x coord1.y coord2.y
#> <chr> <dbl> <dbl> <dbl> <dbl>
#> 1 peak1 100 250 NA NA
#> 2 peak2 500 700 NA NA
#> 3 peak3 1000 1250 NA NA
Created on 2022-12-04 with reprex v2.0.2
| [
"Assuming that your data is the same as the previous question.\ndata <- list(df1, df2, df3)\n\n> data\n[[1]]\n# A tibble: 3 × 3\n peak coord1 coord2\n <chr> <dbl> <dbl>\n1 peak1 100 250\n2 peak2 500 700\n3 peak3 1000 1250\n\n[[2]]\n# A tibble: 3 × 3\n peak coord1 coord2\n <chr> <dbl> <dbl>\n1 peak5 120 300\n2 peak6 280 400\n3 peak7 900 1850\n\n[[3]]\n# A tibble: 3 × 3\n peak coord1 coord2\n <chr> <dbl> <dbl>\n1 peak8 900 2000\n2 peak9 3000 3400\n3 peak10 5600 5850\n\nmap(data, ~ .x %>%\n mutate(peak = str_c(\"peak\", 1:nrow(.)))) %>%\n reduce(left_join, by = \"peak\")\n\n# A tibble: 3 × 7\n peak coord1.x coord2.x coord1.y coord2.y coord1 coord2\n <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>\n1 peak1 100 250 120 300 900 2000\n2 peak2 500 700 280 400 3000 3400\n3 peak3 1000 1250 900 1850 5600 5850\n\n"
] | [
2
] | [] | [] | [
"dplyr",
"r"
] | stackoverflow_0074675143_dplyr_r.txt |
Q:
Parse String to Date with Different Format in Java
I want to convert String to Date in different formats.
For example,
I am getting from user,
String fromDate = "19/05/2009"; // i.e. (dd/MM/yyyy) format
I want to convert this fromDate as a Date object of "yyyy-MM-dd" format
How can I do this?
A:
Take a look at SimpleDateFormat. The code goes something like this:
SimpleDateFormat fromUser = new SimpleDateFormat("dd/MM/yyyy");
SimpleDateFormat myFormat = new SimpleDateFormat("yyyy-MM-dd");
try {
String reformattedStr = myFormat.format(fromUser.parse(inputString));
} catch (ParseException e) {
e.printStackTrace();
}
A:
tl;dr
LocalDate.parse(
"19/05/2009" ,
DateTimeFormatter.ofPattern( "dd/MM/uuuu" )
)
Details
The other Answers with java.util.Date, java.sql.Date, and SimpleDateFormat are now outdated.
LocalDate
The modern way to do date-time is work with the java.time classes, specifically LocalDate. The LocalDate class represents a date-only value without time-of-day and without time zone.
DateTimeFormatter
To parse, or generate, a String representing a date-time value, use the DateTimeFormatter class.
DateTimeFormatter f = DateTimeFormatter.ofPattern( "dd/MM/uuuu" );
LocalDate ld = LocalDate.parse( "19/05/2009" , f );
Do not conflate a date-time object with a String representing its value. A date-time object has no format, while a String does. A date-time object, such as LocalDate, can generate a String to represent its internal value, but the date-time object and the String are separate distinct objects.
You can specify any custom format to generate a String. Or let java.time do the work of automatically localizing.
DateTimeFormatter f =
DateTimeFormatter.ofLocalizedDate( FormatStyle.FULL )
.withLocale( Locale.CANADA_FRENCH ) ;
String output = ld.format( f );
Dump to console.
System.out.println( "ld: " + ld + " | output: " + output );
ld: 2009-05-19 | output: mardi 19 mai 2009
See in action in IdeOne.com.
About java.time
The java.time framework is built into Java 8 and later. These classes supplant the troublesome old legacy date-time classes such as java.util.Date, Calendar, & SimpleDateFormat.
The Joda-Time project, now in maintenance mode, advises migration to the java.time classes.
To learn more, see the Oracle Tutorial. And search Stack Overflow for many examples and explanations. Specification is JSR 310.
You may exchange java.time objects directly with your database. Use a JDBC driver compliant with JDBC 4.2 or later. No need for strings, no need for java.sql.* classes.
Where to obtain the java.time classes?
Java SE 8, Java SE 9, and later
Built-in.
Part of the standard Java API with a bundled implementation.
Java 9 adds some minor features and fixes.
Java SE 6 and Java SE 7
Much of the java.time functionality is back-ported to Java 6 & 7 in ThreeTen-Backport.
Android
Later versions of Android bundle implementations of the java.time classes.
For earlier Android (<26), the ThreeTenABP project adapts ThreeTen-Backport (mentioned above). See How to use ThreeTenABP….
The ThreeTen-Extra project extends java.time with additional classes. This project is a proving ground for possible future additions to java.time. You may find some useful classes here such as Interval, YearWeek, YearQuarter, and more.
A:
Use the SimpleDateFormat class:
private Date parseDate(String date, String format) throws ParseException
{
SimpleDateFormat formatter = new SimpleDateFormat(format);
return formatter.parse(date);
}
Usage:
Date date = parseDate("19/05/2009", "dd/MM/yyyy");
For efficiency, you would want to store your formatters in a hashmap. The hashmap is a static member of your util class.
private static Map<String, SimpleDateFormat> hashFormatters = new HashMap<String, SimpleDateFormat>();
public static Date parseDate(String date, String format) throws ParseException
{
SimpleDateFormat formatter = hashFormatters.get(format);
if (formatter == null)
{
formatter = new SimpleDateFormat(format);
hashFormatters.put(format, formatter);
}
return formatter.parse(date);
}
A:
Convert a string date to java.sql.Date
String fromDate = "19/05/2009";
DateFormat df = new SimpleDateFormat("dd/MM/yyyy");
java.util.Date dtt = df.parse(fromDate);
java.sql.Date ds = new java.sql.Date(dtt.getTime());
System.out.println(ds);//Mon Jul 05 00:00:00 IST 2010
A:
Check the javadocs for java.text.SimpleDateFormat It describes everything you need.
A:
While SimpleDateFormat will indeed work for your needs, additionally you might want to check out Joda Time, which is apparently the basis for the redone Date library in Java 7. While I haven't used it a lot, I've heard nothing but good things about it and if your manipulating dates extensively in your projects it would probably be worth looking into.
A:
Simple way to format a date and convert into string
Date date= new Date();
String dateStr=String.format("%td/%tm/%tY", date,date,date);
System.out.println("Date with format of dd/mm/dd: "+dateStr);
output:Date with format of dd/mm/dd: 21/10/2015
A:
Suppose that you have a string like this :
String mDate="2019-09-17T10:56:07.827088"
Now we want to change this String format separate date and time in Java and Kotlin.
JAVA:
we have a method for extract date :
public String getDate() {
try {
DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS", Locale.US);
Date date = dateFormat.parse(mDate);
dateFormat = new SimpleDateFormat("MM/dd/yyyy", Locale.US);
return dateFormat.format(date);
} catch (ParseException e) {
e.printStackTrace();
}
return null;
}
Return is this : 09/17/2019
And we have method for extract time :
public String getTime() {
try {
DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS", Locale.US);
Date date = dateFormat.parse(mCreatedAt);
dateFormat = new SimpleDateFormat("h:mm a", Locale.US);
return dateFormat.format(date);
} catch (ParseException e) {
e.printStackTrace();
}
return null;
}
Return is this : 10:56 AM
KOTLIN:
we have a function for extract date :
fun getDate(): String? {
var dateFormat = SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS", Locale.US)
val date = dateFormat.parse(mDate!!)
dateFormat = SimpleDateFormat("MM/dd/yyyy", Locale.US)
return dateFormat.format(date!!)
}
Return is this : 09/17/2019
And we have method for extract time :
fun getTime(): String {
var dateFormat = SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS", Locale.US)
val time = dateFormat.parse(mDate!!)
dateFormat = SimpleDateFormat("h:mm a", Locale.US)
return dateFormat.format(time!!)
}
Return is this : 10:56 AM
A:
A Date object has no format, it is a representation. The date can be presented by a String with the format you like.
E.g. "yyyy-MM-dd", "yy-MMM-dd", "dd-MMM-yy" and etc.
To acheive this you can get the use of the SimpleDateFormat
Try this,
String inputString = "19/05/2009"; // i.e. (dd/MM/yyyy) format
SimpleDateFormat fromUser = new SimpleDateFormat("dd/MM/yyyy");
SimpleDateFormat myFormat = new SimpleDateFormat("yyyy-MM-dd");
try {
Date dateFromUser = fromUser.parse(inputString); // Parse it to the exisitng date pattern and return Date type
String dateMyFormat = myFormat.format(dateFromUser); // format it to the date pattern you prefer
System.out.println(dateMyFormat); // outputs : 2009-05-19
} catch (ParseException e) {
e.printStackTrace();
}
This outputs : 2009-05-19
A:
There are multiple ways to do it, but a very practical one is the use String.format which you can use with java.util.Date or java.util.Calendar or event java.time.LocalDate.
String.format is backed by java.util.Formatter.
I like the omnivore take on it.
class Playground {
public static void main(String[ ] args) {
String formatString = "Created on %1$td/%1$tm/%1$tY%n";
System.out.println(String.format(formatString, new java.util.Date()));
System.out.println(String.format(formatString, java.util.Calendar.getInstance()));
System.out.println(String.format(formatString, java.time.LocalDate.now()));
}
}
The output will be in all cases:
Created on 04/12/2022
Created on 04/12/2022
Created on 04/12/2022
| Parse String to Date with Different Format in Java | I want to convert String to Date in different formats.
For example,
I am getting from user,
String fromDate = "19/05/2009"; // i.e. (dd/MM/yyyy) format
I want to convert this fromDate as a Date object of "yyyy-MM-dd" format
How can I do this?
| [
"Take a look at SimpleDateFormat. The code goes something like this:\nSimpleDateFormat fromUser = new SimpleDateFormat(\"dd/MM/yyyy\");\nSimpleDateFormat myFormat = new SimpleDateFormat(\"yyyy-MM-dd\");\n\ntry {\n\n String reformattedStr = myFormat.format(fromUser.parse(inputString));\n} catch (ParseException e) {\n e.printStackTrace();\n}\n\n",
"tl;dr\nLocalDate.parse( \n \"19/05/2009\" , \n DateTimeFormatter.ofPattern( \"dd/MM/uuuu\" ) \n)\n\nDetails\nThe other Answers with java.util.Date, java.sql.Date, and SimpleDateFormat are now outdated.\nLocalDate\nThe modern way to do date-time is work with the java.time classes, specifically LocalDate. The LocalDate class represents a date-only value without time-of-day and without time zone.\nDateTimeFormatter\nTo parse, or generate, a String representing a date-time value, use the DateTimeFormatter class.\nDateTimeFormatter f = DateTimeFormatter.ofPattern( \"dd/MM/uuuu\" );\nLocalDate ld = LocalDate.parse( \"19/05/2009\" , f );\n\nDo not conflate a date-time object with a String representing its value. A date-time object has no format, while a String does. A date-time object, such as LocalDate, can generate a String to represent its internal value, but the date-time object and the String are separate distinct objects.\nYou can specify any custom format to generate a String. Or let java.time do the work of automatically localizing.\nDateTimeFormatter f = \n DateTimeFormatter.ofLocalizedDate( FormatStyle.FULL )\n .withLocale( Locale.CANADA_FRENCH ) ;\nString output = ld.format( f );\n\nDump to console.\nSystem.out.println( \"ld: \" + ld + \" | output: \" + output );\n\n\nld: 2009-05-19 | output: mardi 19 mai 2009\n\nSee in action in IdeOne.com.\n\nAbout java.time\nThe java.time framework is built into Java 8 and later. These classes supplant the troublesome old legacy date-time classes such as java.util.Date, Calendar, & SimpleDateFormat.\nThe Joda-Time project, now in maintenance mode, advises migration to the java.time classes.\nTo learn more, see the Oracle Tutorial. And search Stack Overflow for many examples and explanations. Specification is JSR 310.\nYou may exchange java.time objects directly with your database. Use a JDBC driver compliant with JDBC 4.2 or later. No need for strings, no need for java.sql.* classes.\nWhere to obtain the java.time classes? \n\nJava SE 8, Java SE 9, and later\n\n\nBuilt-in. \nPart of the standard Java API with a bundled implementation.\nJava 9 adds some minor features and fixes.\n\nJava SE 6 and Java SE 7\n\nMuch of the java.time functionality is back-ported to Java 6 & 7 in ThreeTen-Backport.\n\nAndroid\n\nLater versions of Android bundle implementations of the java.time classes.\nFor earlier Android (<26), the ThreeTenABP project adapts ThreeTen-Backport (mentioned above). See How to use ThreeTenABP….\n\n\nThe ThreeTen-Extra project extends java.time with additional classes. This project is a proving ground for possible future additions to java.time. You may find some useful classes here such as Interval, YearWeek, YearQuarter, and more.\n",
"Use the SimpleDateFormat class:\nprivate Date parseDate(String date, String format) throws ParseException\n{\n SimpleDateFormat formatter = new SimpleDateFormat(format);\n return formatter.parse(date);\n}\n\nUsage:\nDate date = parseDate(\"19/05/2009\", \"dd/MM/yyyy\");\n\nFor efficiency, you would want to store your formatters in a hashmap. The hashmap is a static member of your util class.\nprivate static Map<String, SimpleDateFormat> hashFormatters = new HashMap<String, SimpleDateFormat>();\n\npublic static Date parseDate(String date, String format) throws ParseException\n{\n SimpleDateFormat formatter = hashFormatters.get(format);\n\n if (formatter == null)\n {\n formatter = new SimpleDateFormat(format);\n hashFormatters.put(format, formatter);\n }\n\n return formatter.parse(date);\n}\n\n",
"Convert a string date to java.sql.Date\nString fromDate = \"19/05/2009\";\nDateFormat df = new SimpleDateFormat(\"dd/MM/yyyy\");\njava.util.Date dtt = df.parse(fromDate);\njava.sql.Date ds = new java.sql.Date(dtt.getTime());\nSystem.out.println(ds);//Mon Jul 05 00:00:00 IST 2010\n\n",
"Check the javadocs for java.text.SimpleDateFormat It describes everything you need.\n",
"While SimpleDateFormat will indeed work for your needs, additionally you might want to check out Joda Time, which is apparently the basis for the redone Date library in Java 7. While I haven't used it a lot, I've heard nothing but good things about it and if your manipulating dates extensively in your projects it would probably be worth looking into.\n",
"Simple way to format a date and convert into string\n Date date= new Date();\n\n String dateStr=String.format(\"%td/%tm/%tY\", date,date,date);\n\n System.out.println(\"Date with format of dd/mm/dd: \"+dateStr);\n\n\noutput:Date with format of dd/mm/dd: 21/10/2015\n\n",
"Suppose that you have a string like this :\nString mDate=\"2019-09-17T10:56:07.827088\"\n\nNow we want to change this String format separate date and time in Java and Kotlin.\nJAVA:\nwe have a method for extract date :\npublic String getDate() {\n try {\n DateFormat dateFormat = new SimpleDateFormat(\"yyyy-MM-dd'T'HH:mm:ss.SSS\", Locale.US);\n Date date = dateFormat.parse(mDate);\n dateFormat = new SimpleDateFormat(\"MM/dd/yyyy\", Locale.US);\n return dateFormat.format(date);\n } catch (ParseException e) {\n e.printStackTrace();\n }\n return null;\n}\n\nReturn is this : 09/17/2019\nAnd we have method for extract time :\npublic String getTime() {\n\n try {\n DateFormat dateFormat = new SimpleDateFormat(\"yyyy-MM-dd'T'HH:mm:ss.SSS\", Locale.US);\n Date date = dateFormat.parse(mCreatedAt);\n dateFormat = new SimpleDateFormat(\"h:mm a\", Locale.US);\n return dateFormat.format(date);\n } catch (ParseException e) {\n e.printStackTrace();\n }\n return null;\n}\n\nReturn is this : 10:56 AM\nKOTLIN:\nwe have a function for extract date :\nfun getDate(): String? {\n\n var dateFormat = SimpleDateFormat(\"yyyy-MM-dd'T'HH:mm:ss.SSS\", Locale.US)\n val date = dateFormat.parse(mDate!!)\n dateFormat = SimpleDateFormat(\"MM/dd/yyyy\", Locale.US)\n return dateFormat.format(date!!)\n}\n\nReturn is this : 09/17/2019\nAnd we have method for extract time :\nfun getTime(): String {\n\n var dateFormat = SimpleDateFormat(\"yyyy-MM-dd'T'HH:mm:ss.SSS\", Locale.US)\n val time = dateFormat.parse(mDate!!)\n dateFormat = SimpleDateFormat(\"h:mm a\", Locale.US)\n return dateFormat.format(time!!)\n}\n\nReturn is this : 10:56 AM\n",
"A Date object has no format, it is a representation. The date can be presented by a String with the format you like. \nE.g. \"yyyy-MM-dd\", \"yy-MMM-dd\", \"dd-MMM-yy\" and etc.\nTo acheive this you can get the use of the SimpleDateFormat\nTry this,\n String inputString = \"19/05/2009\"; // i.e. (dd/MM/yyyy) format\n\n SimpleDateFormat fromUser = new SimpleDateFormat(\"dd/MM/yyyy\"); \n SimpleDateFormat myFormat = new SimpleDateFormat(\"yyyy-MM-dd\");\n\n try {\n Date dateFromUser = fromUser.parse(inputString); // Parse it to the exisitng date pattern and return Date type\n String dateMyFormat = myFormat.format(dateFromUser); // format it to the date pattern you prefer\n System.out.println(dateMyFormat); // outputs : 2009-05-19\n\n } catch (ParseException e) {\n e.printStackTrace();\n }\n\nThis outputs : 2009-05-19\n",
"There are multiple ways to do it, but a very practical one is the use String.format which you can use with java.util.Date or java.util.Calendar or event java.time.LocalDate.\nString.format is backed by java.util.Formatter.\nI like the omnivore take on it.\nclass Playground {\n public static void main(String[ ] args) {\n String formatString = \"Created on %1$td/%1$tm/%1$tY%n\";\n System.out.println(String.format(formatString, new java.util.Date()));\n System.out.println(String.format(formatString, java.util.Calendar.getInstance()));\n System.out.println(String.format(formatString, java.time.LocalDate.now()));\n }\n}\n\nThe output will be in all cases:\nCreated on 04/12/2022\n\nCreated on 04/12/2022\n\nCreated on 04/12/2022\n\n"
] | [
192,
17,
13,
8,
5,
3,
2,
2,
1,
0
] | [] | [] | [
"date",
"java",
"string"
] | stackoverflow_0000882420_date_java_string.txt |
Q:
Time complexity of O(NlogN)
What will be time complexity of this snippet?
`
for(int i=0; i<n;i++){
for (int j=0; j<n/2;j++){
}
}
`
A:
The time complexity of the given code snippet is O(n^2). This is because the outer for loop runs n times, and the inner for loop runs n/2 times for each iteration of the outer for loop. Thus, the total number of iterations is n * (n/2), which is equivalent to n^2.
| Time complexity of O(NlogN) | What will be time complexity of this snippet?
`
for(int i=0; i<n;i++){
for (int j=0; j<n/2;j++){
}
}
`
| [
"The time complexity of the given code snippet is O(n^2). This is because the outer for loop runs n times, and the inner for loop runs n/2 times for each iteration of the outer for loop. Thus, the total number of iterations is n * (n/2), which is equivalent to n^2.\n"
] | [
0
] | [] | [] | [
"structure",
"time",
"time_complexity"
] | stackoverflow_0074607792_structure_time_time_complexity.txt |
Q:
How can I Fix The Problem at my showStudent function at my C++ code?
I have to use void showStudent(struct student st); for create showStudent for my code, so I want the program appeared like this.
Program Appearance
But I have an error messages at complier that I don't know how to fix them.
Error messages
Does anyone know how to fix it?
Here's my code, Thanks for advice.
`
#include <stdio.h>
#include <string.h>
typedef struct {
char stdcode[10];
char Name[60];
float gpa;
} student;
void showStudent(struct student st);
int main() {
student stds[4];
int i;
int n = 4;
printf("Enter new data\n");
for (i = 0; i < n; i++)
{
printf("Student%d\n", i + 1);
printf("Code: ");
gets_s(stds[i].stdcode);
printf("Name: ");
gets_s(stds[i].Name);
printf("GPA: ");
scanf_s("%f", &stds[i].gpa);
getchar();
}
showStudent(stds, n); return 0;
}
void showStudent(struct student st);
{
int i, n;
printf("\nAll Students\n");
for (i = 0; i < n; i++)
{
printf("%s %-15sGPA= %f\n", stds[i].stdcode, stds[i].Name, stds[i].gpa);
}
}
`
I want to fix my showStudent function. I want the program appear in picture below.
enter image description here
A:
You're using c headers and functions in c++ code, It is not possible that all of your c code will work in CPP. I've modified the code, hope it will be helpful
Header Files
#include <iostream>
#include <cstdio>
using namespace std;
Student structure
struct Student
{
string stdcode;
string Name;
float gpa;
};
void showStudent(Student *stds, int n);
main function
changed char[] sequence to string,
printf to cout,
and get_s to cin & getline.
int main()
{
Student *stds;
stds = new Student[4];
int i;
int n = 4;
cout << "Enter new data" << endl;
for (i = 0; i < n; i++)
{
string code;
string name;
float gpa;
cout << "\nStudent" << i + 1 << endl;
cout << "Code: ";
getline(cin, code);
cout << "Name: ";
getline(cin, name);
cout << "GPA: ";
cin >> gpa;
string temp;
getline(cin, temp);
Student student = {code, name, gpa};
stds[i] = student;
}
showStudent(stds, n);
return 0;
}
Definition of showStudent
you've passed 2 arguments to the function during calls, but the function definition has only a single parameter so I changed it...
void showStudent(Student *stds, int n)
{
cout << "\nAll Students\n";
for (int i = 0; i < n; i++)
cout << stds[i].stdcode << " " << stds[i].Name << " GPA " << stds[i].gpa << endl;
}
output
Enter new data
Student1
Code: 101
Name: Sidharth Mudgil
GPA: 9.5
Student2
Code: 102
Name: Prateek Grag
GPA: 8.5
Student3
Code: 103
Name: Jai Karan
GPA: 6.2
Student4
Code: 104
Name: Kush Yadav
GPA: 7.6
All Students
101 Sidharth Mudgil GPA 9.5
102 Prateek Grag GPA 8.5
103 Jai Karan GPA 6.2
104 Kush Yadav GPA 7.6
| How can I Fix The Problem at my showStudent function at my C++ code? | I have to use void showStudent(struct student st); for create showStudent for my code, so I want the program appeared like this.
Program Appearance
But I have an error messages at complier that I don't know how to fix them.
Error messages
Does anyone know how to fix it?
Here's my code, Thanks for advice.
`
#include <stdio.h>
#include <string.h>
typedef struct {
char stdcode[10];
char Name[60];
float gpa;
} student;
void showStudent(struct student st);
int main() {
student stds[4];
int i;
int n = 4;
printf("Enter new data\n");
for (i = 0; i < n; i++)
{
printf("Student%d\n", i + 1);
printf("Code: ");
gets_s(stds[i].stdcode);
printf("Name: ");
gets_s(stds[i].Name);
printf("GPA: ");
scanf_s("%f", &stds[i].gpa);
getchar();
}
showStudent(stds, n); return 0;
}
void showStudent(struct student st);
{
int i, n;
printf("\nAll Students\n");
for (i = 0; i < n; i++)
{
printf("%s %-15sGPA= %f\n", stds[i].stdcode, stds[i].Name, stds[i].gpa);
}
}
`
I want to fix my showStudent function. I want the program appear in picture below.
enter image description here
| [
"You're using c headers and functions in c++ code, It is not possible that all of your c code will work in CPP. I've modified the code, hope it will be helpful\nHeader Files\n#include <iostream>\n#include <cstdio>\n\nusing namespace std;\n\nStudent structure\nstruct Student\n{\n string stdcode;\n string Name;\n float gpa;\n};\n\nvoid showStudent(Student *stds, int n);\n\nmain function\n\nchanged char[] sequence to string,\nprintf to cout,\nand get_s to cin & getline.\n\nint main()\n{\n Student *stds;\n stds = new Student[4];\n int i;\n int n = 4;\n cout << \"Enter new data\" << endl;\n\n for (i = 0; i < n; i++)\n {\n string code;\n string name;\n float gpa;\n\n cout << \"\\nStudent\" << i + 1 << endl;\n cout << \"Code: \";\n getline(cin, code);\n\n cout << \"Name: \";\n getline(cin, name);\n\n cout << \"GPA: \";\n cin >> gpa;\n\n string temp;\n getline(cin, temp);\n\n Student student = {code, name, gpa};\n stds[i] = student;\n }\n\n showStudent(stds, n);\n\n return 0;\n}\n\nDefinition of showStudent\n\nyou've passed 2 arguments to the function during calls, but the function definition has only a single parameter so I changed it...\n\nvoid showStudent(Student *stds, int n)\n{\n cout << \"\\nAll Students\\n\";\n for (int i = 0; i < n; i++)\n cout << stds[i].stdcode << \" \" << stds[i].Name << \" GPA \" << stds[i].gpa << endl;\n}\n\noutput\nEnter new data\n\nStudent1 \nCode: 101\nName: Sidharth Mudgil\nGPA: 9.5\n\nStudent2\nCode: 102\nName: Prateek Grag\nGPA: 8.5\n\nStudent3\nCode: 103\nName: Jai Karan\nGPA: 6.2\n\nStudent4\nCode: 104\nName: Kush Yadav\nGPA: 7.6\n\nAll Students\n101 Sidharth Mudgil GPA 9.5\n102 Prateek Grag GPA 8.5\n103 Jai Karan GPA 6.2\n104 Kush Yadav GPA 7.6\n\n"
] | [
0
] | [] | [] | [
"c++",
"function",
"struct",
"typedef",
"visual_c++"
] | stackoverflow_0074674924_c++_function_struct_typedef_visual_c++.txt |
Q:
Justify-content: space-between doesn't appear to be working in a flexbox
I have tried changing a bunch of things in the CSS with classes and id's, but nothing seems to make it so that both images will be on the far side of the screen (logo on the far left and profile on the far right).
Tried lots of different things like text-align and different justify-contents but nothing appears to work.
Here is the code:
.top-nav {
display: flex;
position: absolute;
background-color: blue;
opacity: 0.5;
height: 10%;
top: 0;
width: 100%;
left: 0;
padding: 0;
border: 0;
margin: 0;
list-style: none;
}
.top-nav div {
display: flex;
justify-content: space-between;
height: 100%;
margin: 0px;
padding: 0;
border: 0;
margin: 0;
}
<div class="top-nav">
<div style="flex-grow: 1"><img src="/textures/logo.svg"></div>
<div style="flex-grow: 1"><img src="/textures/profile.svg"></div>
</div>
A:
justify-content right now does nothing, because it's set on a div that doesn't have display:flex on it.
If you want want the divs with images separeted, then put the justify-content:space-between on the div that has them i.e. the top-nav div.
A:
The obvious answer is that you set the flex-items the top-nav div elements – the parents of the <img> elements – to expand to fill the availlable space; this means the <div> elements fill that space, and the <img> elements are aligned via the default text-align for the language defined by your browser.
Instead you could either remove the <div> elements, as they do little to help and bloat the HTML for no reason, and specify justify-content: space-between on the .top-nav element:
.top-nav {
display: flex;
background-color: blue;
justify-content: space-between;
}
<div class="top-nav">
<img src="https://placekitten.com/300">
<img src="https://via.placeholder.com/300">
</div>
Or, you could either retain the <div> elements and simply omit the flex-grow statement:
.top-nav {
display: flex;
background-color: blue;
justify-content: space-between;
}
<div class="top-nav">
<div><img src="https://placekitten.com/300"></div>
<div><img src="https://via.placeholder.com/300"></div>
</div>
Or simply use text-align on those <div> elements:
.top-nav {
display: flex;
background-color: blue;
justify-content: space-between;
}
.top-nav div:first-child {
text-align: left;
}
.top-nav div:last-child {
text-align: right;
}
<div class="top-nav">
<div><img src="https://placekitten.com/300"></div>
<div><img src="https://via.placeholder.com/300"></div>
</div>
References:
display.
flex-grow.
justify-content.
text-align.
A:
Looking for a result similar to this?
.top-nav {
display: flex;
position: absolute;
background-color: blue;
opacity: 0.5;
height: 10%;
top: 0;
width: 100%;
left: 0;
padding: 0;
border: 0;
margin: 0;
list-style: none;
}
.top-nav .brand {
margin-right: auto; /* Push element to the left*/
}
.top-nav .profile {
margin-left: auto; /* Push element to the right */
text-align: right;
}
<div class="top-nav">
<div class="brand">
<img src="/textures/logo.svg">
</div>
<div class="profile">
<img src="/textures/profile.svg">
</div>
</div>
| Justify-content: space-between doesn't appear to be working in a flexbox | I have tried changing a bunch of things in the CSS with classes and id's, but nothing seems to make it so that both images will be on the far side of the screen (logo on the far left and profile on the far right).
Tried lots of different things like text-align and different justify-contents but nothing appears to work.
Here is the code:
.top-nav {
display: flex;
position: absolute;
background-color: blue;
opacity: 0.5;
height: 10%;
top: 0;
width: 100%;
left: 0;
padding: 0;
border: 0;
margin: 0;
list-style: none;
}
.top-nav div {
display: flex;
justify-content: space-between;
height: 100%;
margin: 0px;
padding: 0;
border: 0;
margin: 0;
}
<div class="top-nav">
<div style="flex-grow: 1"><img src="/textures/logo.svg"></div>
<div style="flex-grow: 1"><img src="/textures/profile.svg"></div>
</div>
| [
"justify-content right now does nothing, because it's set on a div that doesn't have display:flex on it.\nIf you want want the divs with images separeted, then put the justify-content:space-between on the div that has them i.e. the top-nav div.\n",
"The obvious answer is that you set the flex-items the top-nav div elements – the parents of the <img> elements – to expand to fill the availlable space; this means the <div> elements fill that space, and the <img> elements are aligned via the default text-align for the language defined by your browser.\nInstead you could either remove the <div> elements, as they do little to help and bloat the HTML for no reason, and specify justify-content: space-between on the .top-nav element:\n\n\n.top-nav {\n display: flex;\n background-color: blue;\n justify-content: space-between;\n}\n<div class=\"top-nav\">\n <img src=\"https://placekitten.com/300\">\n <img src=\"https://via.placeholder.com/300\">\n</div>\n\n\n\nOr, you could either retain the <div> elements and simply omit the flex-grow statement:\n\n\n.top-nav {\n display: flex;\n background-color: blue;\n justify-content: space-between;\n}\n<div class=\"top-nav\">\n <div><img src=\"https://placekitten.com/300\"></div>\n <div><img src=\"https://via.placeholder.com/300\"></div>\n</div>\n\n\n\nOr simply use text-align on those <div> elements:\n\n\n.top-nav {\n display: flex;\n background-color: blue;\n justify-content: space-between;\n}\n\n.top-nav div:first-child {\n text-align: left;\n}\n\n.top-nav div:last-child {\n text-align: right;\n}\n<div class=\"top-nav\">\n <div><img src=\"https://placekitten.com/300\"></div>\n <div><img src=\"https://via.placeholder.com/300\"></div>\n</div>\n\n\n\nReferences:\n\ndisplay.\nflex-grow.\njustify-content.\ntext-align.\n\n",
"Looking for a result similar to this?\n\n\n.top-nav {\n display: flex;\n position: absolute;\n background-color: blue;\n opacity: 0.5;\n height: 10%;\n top: 0;\n width: 100%;\n left: 0;\n padding: 0;\n border: 0;\n margin: 0;\n list-style: none;\n}\n\n.top-nav .brand {\n margin-right: auto; /* Push element to the left*/\n}\n\n.top-nav .profile {\n margin-left: auto; /* Push element to the right */\n text-align: right;\n}\n<div class=\"top-nav\">\n <div class=\"brand\">\n <img src=\"/textures/logo.svg\">\n </div>\n <div class=\"profile\">\n <img src=\"/textures/profile.svg\">\n </div>\n</div>\n\n\n\n"
] | [
0,
0,
0
] | [] | [] | [
"css",
"html",
"web"
] | stackoverflow_0074675163_css_html_web.txt |
Q:
Still getting data after expire SANCTUM
Hi I edit the config/sanctum.php , I add a value for 1 min, to test if I will get an error after expiration. when I send a request after 3 mins from my SPA app, I was able to get the details. I expected that it will throw an error or some status code 422 or whatever the status code is being thrown by the laravel.
'expiration' => 1
<?php
return [
/*
|--------------------------------------------------------------------------
| Stateful Domains
|--------------------------------------------------------------------------
|
| Requests from the following domains / hosts will receive stateful API
| authentication cookies. Typically, these should include your local
| and production domains which access your API via a frontend SPA.
|
*/
'stateful' => explode(',', env('SANCTUM_STATEFUL_DOMAINS', 'localhost,127.0.0.1,127.0.0.1:8000,::1')),
/*
|--------------------------------------------------------------------------
| Expiration Minutes
|--------------------------------------------------------------------------
|
| This value controls the number of minutes until an issued token will be
| considered expired. If this value is null, personal access tokens do
| not expire. This won't tweak the lifetime of first-party sessions.
|
*/
'expiration' => 1,
/*
|--------------------------------------------------------------------------
| Sanctum Middleware
|--------------------------------------------------------------------------
|
| When authenticating your first-party SPA with Sanctum you may need to
| customize some of the middleware Sanctum uses while processing the
| request. You may change the middleware listed below as required.
|
*/
'middleware' => [
'verify_csrf_token' => App\Http\Middleware\VerifyCsrfToken::class,
'encrypt_cookies' => App\Http\Middleware\EncryptCookies::class,
],
];
did I miss something in the configuration or elsewhere?
Thank you in advance.
A:
clean your cache configuration and test it again
php artisan config:cache
then
php artisan cache:clear
i think it can resolve your problem .
A:
For your question you can try to change the values of expiration
'expiration' => 120
Or make
'expiration' => null
If this value is null, personal access tokens do| not expire.
| Still getting data after expire SANCTUM | Hi I edit the config/sanctum.php , I add a value for 1 min, to test if I will get an error after expiration. when I send a request after 3 mins from my SPA app, I was able to get the details. I expected that it will throw an error or some status code 422 or whatever the status code is being thrown by the laravel.
'expiration' => 1
<?php
return [
/*
|--------------------------------------------------------------------------
| Stateful Domains
|--------------------------------------------------------------------------
|
| Requests from the following domains / hosts will receive stateful API
| authentication cookies. Typically, these should include your local
| and production domains which access your API via a frontend SPA.
|
*/
'stateful' => explode(',', env('SANCTUM_STATEFUL_DOMAINS', 'localhost,127.0.0.1,127.0.0.1:8000,::1')),
/*
|--------------------------------------------------------------------------
| Expiration Minutes
|--------------------------------------------------------------------------
|
| This value controls the number of minutes until an issued token will be
| considered expired. If this value is null, personal access tokens do
| not expire. This won't tweak the lifetime of first-party sessions.
|
*/
'expiration' => 1,
/*
|--------------------------------------------------------------------------
| Sanctum Middleware
|--------------------------------------------------------------------------
|
| When authenticating your first-party SPA with Sanctum you may need to
| customize some of the middleware Sanctum uses while processing the
| request. You may change the middleware listed below as required.
|
*/
'middleware' => [
'verify_csrf_token' => App\Http\Middleware\VerifyCsrfToken::class,
'encrypt_cookies' => App\Http\Middleware\EncryptCookies::class,
],
];
did I miss something in the configuration or elsewhere?
Thank you in advance.
| [
"clean your cache configuration and test it again\nphp artisan config:cache\n\nthen\nphp artisan cache:clear\n\ni think it can resolve your problem .\n",
"For your question you can try to change the values of expiration\n 'expiration' => 120\n\nOr make\n 'expiration' => null\n\nIf this value is null, personal access tokens do| not expire.\n"
] | [
0,
0
] | [] | [] | [
"laravel_8",
"laravel_sanctum",
"php"
] | stackoverflow_0073768848_laravel_8_laravel_sanctum_php.txt |
Q:
Rendering a popup after clicking an other component
I have a component named Card, and a const boolean variable named auth to refer the status of authorization of user. If user is not logged in, the auth is false.
When auth is false, and if user clicks on one of the Card components, I want to show him an popup that he should be register if he wants to click on a Card.
Here is my Card component:
import * as React from 'react';
import {useState} from 'react';
import './card.css';
import HeartButton from "./HeartButton";
import { StyledEngineProvider } from '@mui/material/styles';
import Modal from './Pop';
const Card = ({ profile, auth }) => {
const handleClick = () => {
// RENDER COMPONENT MODAL HERE
// MODAL SHOULD BE RENDERED IF AND ONLY IF AUTH IS FALSE
}
return (
<div className='influencer-profile-card' onClick={handleClick}>
<div>
<HeartButton/>
</div>
<div className="profile_picture">
<img src={profile.picture !== 'N/A' ? profile.picture : 'https://via.placeholder.com/400'} alt={profile.name} />
</div>
<div className="info">
<h3>{profile.name}</h3>
<h4>{profile.ID + " • " + profile.category}</h4>
</div>
</div>
)
}
export default Card
and here is the Modal:
import * as React from 'react';
import Backdrop from '@mui/material/Backdrop';
import Box from '@mui/material/Box';
import Modal from '@mui/material/Modal';
import Fade from '@mui/material/Fade';
import Button from '@mui/material/Button';
import Typography from '@mui/material/Typography';
import "./popup.css";
function TransitionsModal(auth) {
const [open, setOpen] = React.useState(auth);
const handleClose = () => setOpen(false);
return (
<div>
<Modal
aria-labelledby="transition-modal-title"
aria-describedby="transition-modal-description"
open={open}
onClose={handleClose}
closeAfterTransition
BackdropComponent={Backdrop}
BackdropProps={{
timeout: 500
}}>
<Fade in={open}>
<div className="popup-container" id='blur'>
<h1>Profil detaylarını görebilmek için lütfen giriş yapınız.</h1 >
<div className='popup-buttons'>
<a className='giris-yap-wrapper' href="/signIn">
<input
type="submit"
value="Giriş Yap"
className="popup-input-login"
onclick="togglePopup()"/>
</a>
<input type="submit" value="Vazgeç" className="popup-input" onClick={handleClose}/>
</div>
</div>
</Fade>
</Modal>
</div>
);
}
export default TransitionsModal;
I'm new to react and I can't figure out how to code this. Please help me
A:
Here's how you can modify the Card component to show the modal when the user clicks on the card and auth is false:
const Card = ({ profile, auth }) => {
// Use a state variable to track whether the modal should be displayed or not
const [showModal, setShowModal] = useState(false);
const handleClick = () => {
// If the user is not authorized, show the modal
if (!auth) {
setShowModal(true);
}
}
return (
<div className='influencer-profile-card' onClick={handleClick}>
{/* Show the modal if showModal is true */}
{showModal && <Modal auth={auth} onClose={() => setShowModal(false)} />}
<div>
<HeartButton/>
</div>
<div className="profile_picture">
<img src={profile.picture !== 'N/A' ? profile.picture : 'https://via.placeholder.com/400'} alt={profile.name} />
</div>
<div className="info">
<h3>{profile.name}</h3>
<h4>{profile.ID + " • " + profile.category}</h4>
</div>
</div>
)
}
In the Modal component, you can accept the auth value as a prop and use it to decide whether to show the "Giriş Yap" or "Vazgeç" buttons. You can also add a callback prop that the Card component can use to close the modal.
Here's how you can modify the Modal component to accept the auth prop and the onClose callback prop:
function TransitionsModal({ auth, onClose }) {
// Use a state variable to track whether the modal is open or not
const [open, setOpen] = React.useState(auth);
const handleClose = () => {
setOpen(false);
onClose();
};
return (
<div>
<Modal
aria-labelledby="transition-modal-title"
aria-describedby="transition-modal-description"
open={open}
onClose={handleClose}
closeAfterTransition
BackdropComponent={Backdrop}
BackdropProps={{
timeout: 500
}}
>
<Fade in={open}>
<div className="popup-container" id="blur">
<h1>Profil detaylarını görebilmek için lütfen giriş yapınız.</h1>
<div className="popup-buttons">
{/* Show the "Giriş Yap" button if the user is not authorized */}
{!auth && (
<a className="giris-yap-wrapper" href="/signIn">
<input
type="submit"
value="Giriş Yap"
className="popup-input-login"
onclick="togglePopup()"
/>
</a>
)}
<input
type="submit"
value="Vazgeç"
className="popup-input"
onClick={handleClose}
/>
</div>
</div>
</Fade>
</Modal>
</div>
);
}
This code shows the "Giriş Yap" button if the user is not authorized (i.e. if auth is false). When the user clicks on this button, it will navigate to the /signIn page.
Next, the code shows the "Vazgeç" button that the user can use to close the modal. When the user clicks on this button, it will call the handleClose function, which will close the modal and call the onClose callback prop that was passed to the Modal component.
| Rendering a popup after clicking an other component | I have a component named Card, and a const boolean variable named auth to refer the status of authorization of user. If user is not logged in, the auth is false.
When auth is false, and if user clicks on one of the Card components, I want to show him an popup that he should be register if he wants to click on a Card.
Here is my Card component:
import * as React from 'react';
import {useState} from 'react';
import './card.css';
import HeartButton from "./HeartButton";
import { StyledEngineProvider } from '@mui/material/styles';
import Modal from './Pop';
const Card = ({ profile, auth }) => {
const handleClick = () => {
// RENDER COMPONENT MODAL HERE
// MODAL SHOULD BE RENDERED IF AND ONLY IF AUTH IS FALSE
}
return (
<div className='influencer-profile-card' onClick={handleClick}>
<div>
<HeartButton/>
</div>
<div className="profile_picture">
<img src={profile.picture !== 'N/A' ? profile.picture : 'https://via.placeholder.com/400'} alt={profile.name} />
</div>
<div className="info">
<h3>{profile.name}</h3>
<h4>{profile.ID + " • " + profile.category}</h4>
</div>
</div>
)
}
export default Card
and here is the Modal:
import * as React from 'react';
import Backdrop from '@mui/material/Backdrop';
import Box from '@mui/material/Box';
import Modal from '@mui/material/Modal';
import Fade from '@mui/material/Fade';
import Button from '@mui/material/Button';
import Typography from '@mui/material/Typography';
import "./popup.css";
function TransitionsModal(auth) {
const [open, setOpen] = React.useState(auth);
const handleClose = () => setOpen(false);
return (
<div>
<Modal
aria-labelledby="transition-modal-title"
aria-describedby="transition-modal-description"
open={open}
onClose={handleClose}
closeAfterTransition
BackdropComponent={Backdrop}
BackdropProps={{
timeout: 500
}}>
<Fade in={open}>
<div className="popup-container" id='blur'>
<h1>Profil detaylarını görebilmek için lütfen giriş yapınız.</h1 >
<div className='popup-buttons'>
<a className='giris-yap-wrapper' href="/signIn">
<input
type="submit"
value="Giriş Yap"
className="popup-input-login"
onclick="togglePopup()"/>
</a>
<input type="submit" value="Vazgeç" className="popup-input" onClick={handleClose}/>
</div>
</div>
</Fade>
</Modal>
</div>
);
}
export default TransitionsModal;
I'm new to react and I can't figure out how to code this. Please help me
| [
"Here's how you can modify the Card component to show the modal when the user clicks on the card and auth is false:\nconst Card = ({ profile, auth }) => {\n // Use a state variable to track whether the modal should be displayed or not\n const [showModal, setShowModal] = useState(false);\n\n const handleClick = () => {\n // If the user is not authorized, show the modal\n if (!auth) {\n setShowModal(true);\n }\n }\n\n return (\n <div className='influencer-profile-card' onClick={handleClick}>\n\n {/* Show the modal if showModal is true */}\n {showModal && <Modal auth={auth} onClose={() => setShowModal(false)} />}\n\n <div>\n <HeartButton/>\n </div>\n\n <div className=\"profile_picture\">\n <img src={profile.picture !== 'N/A' ? profile.picture : 'https://via.placeholder.com/400'} alt={profile.name} />\n </div>\n\n <div className=\"info\">\n <h3>{profile.name}</h3>\n <h4>{profile.ID + \" • \" + profile.category}</h4>\n </div>\n\n </div>\n )\n}\n\nIn the Modal component, you can accept the auth value as a prop and use it to decide whether to show the \"Giriş Yap\" or \"Vazgeç\" buttons. You can also add a callback prop that the Card component can use to close the modal.\nHere's how you can modify the Modal component to accept the auth prop and the onClose callback prop:\nfunction TransitionsModal({ auth, onClose }) {\n // Use a state variable to track whether the modal is open or not\n const [open, setOpen] = React.useState(auth);\n const handleClose = () => {\n setOpen(false);\n onClose();\n };\n\n return (\n <div>\n <Modal\n aria-labelledby=\"transition-modal-title\"\n aria-describedby=\"transition-modal-description\"\n open={open}\n onClose={handleClose}\n closeAfterTransition\n BackdropComponent={Backdrop}\n BackdropProps={{\n timeout: 500\n }}\n >\n <Fade in={open}>\n <div className=\"popup-container\" id=\"blur\">\n <h1>Profil detaylarını görebilmek için lütfen giriş yapınız.</h1>\n <div className=\"popup-buttons\">\n {/* Show the \"Giriş Yap\" button if the user is not authorized */}\n {!auth && (\n <a className=\"giris-yap-wrapper\" href=\"/signIn\">\n <input\n type=\"submit\"\n value=\"Giriş Yap\"\n className=\"popup-input-login\"\n onclick=\"togglePopup()\"\n />\n </a>\n )}\n <input\n type=\"submit\"\n value=\"Vazgeç\"\n className=\"popup-input\"\n onClick={handleClose}\n />\n </div>\n </div>\n </Fade>\n </Modal>\n </div>\n );\n}\n\nThis code shows the \"Giriş Yap\" button if the user is not authorized (i.e. if auth is false). When the user clicks on this button, it will navigate to the /signIn page.\nNext, the code shows the \"Vazgeç\" button that the user can use to close the modal. When the user clicks on this button, it will call the handleClose function, which will close the modal and call the onClose callback prop that was passed to the Modal component.\n"
] | [
1
] | [] | [] | [
"javascript",
"react_hooks",
"reactjs"
] | stackoverflow_0074675361_javascript_react_hooks_reactjs.txt |
Q:
Flutter [in_app_purchase] Get all plans inside subscription
I'm using the in_app_purchase package, but I only can get one plan inside the subscriptions
I have 3 subscriptions:
Basic subscription
Premium subscription
Enterprise subscription
And inside each subscription, I want to have 2 plans:
Month plan
Year plan
I always get the plan that has the "backward compatibility"("This will be the baseline returned by the deprecated Google Play Billing Library method querySkuDetailsAsync()") enabled.
Is any way to get all plans, or do I have to have 6 subscriptions with only 1 plan in each one?
Edit:
import 'dart:async';
import 'dart:io';
import 'package:flutter/material.dart';
import 'package:flutter_i18n/flutter_i18n.dart';
import 'package:in_app_purchase/in_app_purchase.dart';
import 'package:in_app_purchase_storekit/in_app_purchase_storekit.dart';
import 'package:in_app_purchase_storekit/store_kit_wrappers.dart';
import 'package:motorline_home/widgets/materials/appbar/appbar_title_widget.dart';
import 'package:motorline_home/widgets/materials/pop_button_widget.dart';
import 'package:rxdart/subjects.dart';
class SubscriptionPage extends StatefulWidget {
const SubscriptionPage({
Key? key,
}) : super(key: key);
@override
State<SubscriptionPage> createState() => _SubscriptionPageState();
}
class _SubscriptionPageState extends State<SubscriptionPage> {
// In app subscriptions
InAppPurchase _inAppPurchase = InAppPurchase.instance;
late StreamSubscription<List<PurchaseDetails>> _inAppPurchaseSubscription;
StreamController<List<ProductDetails>> _streamGooglePlaySubscriptions =
BehaviorSubject();
final List<String> _subscriptionsIDs = [
"basic",
"premium",
"enterprise",
];
@override
void initState() {
super.initState();
// In app purchase subscription
_inAppPurchaseSubscription =
_inAppPurchase.purchaseStream.listen((purchaseDetailsList) {
_listenToPurchaseUpdated(purchaseDetailsList);
}, onDone: () {
print("In app purchase onDone");
_inAppPurchaseSubscription.cancel();
}, onError: (error) {
print("In app purchase error: ${error.toString()}");
// handle error here.
_inAppPurchaseSubscription.cancel();
});
// Initialize in app purchase
_initializeInAppPurchase();
}
@override
void dispose() {
if (Platform.isIOS) {
final InAppPurchaseStoreKitPlatformAddition iosPlatformAddition =
_inAppPurchase
.getPlatformAddition<InAppPurchaseStoreKitPlatformAddition>();
iosPlatformAddition.setDelegate(null);
}
// Cancel in app purchase listener
_inAppPurchaseSubscription.cancel();
super.dispose();
}
@override
Widget build(BuildContext context) {
return Scaffold(
appBar: AppBar(
title: AppBarTitleWidget(
title: FlutterI18n.translate(context, "subscriptions"),
),
leading: PopButtonWidget(),
),
// Body
body: Container(),
);
}
void _initializeInAppPurchase() async {
print("Initializing in app purchase");
bool available = await _inAppPurchase.isAvailable();
print("In app purchase initialized: $available");
if (available) {
if (Platform.isIOS) {
final InAppPurchaseStoreKitPlatformAddition iosPlatformAddition =
_inAppPurchase
.getPlatformAddition<InAppPurchaseStoreKitPlatformAddition>();
await iosPlatformAddition.setDelegate(ExamplePaymentQueueDelegate());
}
// Get subscriptions
List<ProductDetails> subscriptions = await _getSubscriptions(
productIds:
_subscriptionsIDs.toSet(),
);
// Sort by price
subscriptions.sort((a, b) => a.rawPrice.compareTo(b.rawPrice));
// Add subscriptions to stream
_streamGooglePlaySubscriptions.add(subscriptions);
// DEBUG: Print subscriptions
print("In app purchase subscription subscriptions: ${subscriptions}");
for (var subscription in subscriptions) {
print("In app purchase plan: ${subscription.id}: ${subscription.rawPrice}");
print("In app purchase description: ${subscription.description}");
// HOW GET ALL PLANS IN EACH SUBSCRIPTION ID?
}
await InAppPurchase.instance.restorePurchases();
}
}
// In app purchase updates
void _listenToPurchaseUpdated(List<PurchaseDetails> purchaseDetailsList) {
purchaseDetailsList.forEach((PurchaseDetails purchaseDetails) async {
// If purchase is pending
if (purchaseDetails.status == PurchaseStatus.pending) {
print("In app purchase pending...");
// Show pending ui
} else {
if (purchaseDetails.status == PurchaseStatus.canceled) {
print("In app purchase cancelled");
}
// If purchase failed
if (purchaseDetails.status == PurchaseStatus.error) {
print("In app purchase error");
// Show error
} else if (purchaseDetails.status == PurchaseStatus.purchased ||
purchaseDetails.status == PurchaseStatus.restored) {
print("In app purchase restored or purchased");
}
if (purchaseDetails.pendingCompletePurchase) {
debugPrint("In app purchase complete purchased");
debugPrint(
"In app purchase purchase id : ${purchaseDetails.purchaseID}");
debugPrint(
"In app purchase server data : ${purchaseDetails.verificationData.serverVerificationData}");
debugPrint(
"In app purchase local data : ${purchaseDetails.verificationData.localVerificationData}");
// Verify purchase on backend
try {
// VALIDADE PURCHASE IN BACKEND
} catch (error) {
debugPrint("In app purchase error: ${error.toString()}");
}
}
}
});
}
// Get subscription
Future<List<ProductDetails>> _getSubscriptions(
{required Set<String> productIds}) async {
ProductDetailsResponse response =
await _inAppPurchase.queryProductDetails(productIds);
return response.productDetails;
}
}
/// Example implementation of the
/// [`SKPaymentQueueDelegate`](https://developer.apple.com/documentation/storekit/skpaymentqueuedelegate?language=objc).
///
/// The payment queue delegate can be implementated to provide information
/// needed to complete transactions.
class ExamplePaymentQueueDelegate implements SKPaymentQueueDelegateWrapper {
@override
bool shouldContinueTransaction(
SKPaymentTransactionWrapper transaction, SKStorefrontWrapper storefront) {
return true;
}
@override
bool shouldShowPriceConsent() {
return false;
}
}
A:
What you like to do is not possible. You need to create for each subscription a new plan, you can't say a Premium Subscription does have a yearly and a monthly plan.
A:
This is not possible at the moment since it's not supported by official plugin (and we don't know when it will be):
https://github.com/flutter/flutter/issues/110909
As they mentioned, solution is one plan per subscription with the extra logic necessary in your app (e.g. to detect if subscription with ID PLUS-year is an upgrade of PLUS-month)
| Flutter [in_app_purchase] Get all plans inside subscription | I'm using the in_app_purchase package, but I only can get one plan inside the subscriptions
I have 3 subscriptions:
Basic subscription
Premium subscription
Enterprise subscription
And inside each subscription, I want to have 2 plans:
Month plan
Year plan
I always get the plan that has the "backward compatibility"("This will be the baseline returned by the deprecated Google Play Billing Library method querySkuDetailsAsync()") enabled.
Is any way to get all plans, or do I have to have 6 subscriptions with only 1 plan in each one?
Edit:
import 'dart:async';
import 'dart:io';
import 'package:flutter/material.dart';
import 'package:flutter_i18n/flutter_i18n.dart';
import 'package:in_app_purchase/in_app_purchase.dart';
import 'package:in_app_purchase_storekit/in_app_purchase_storekit.dart';
import 'package:in_app_purchase_storekit/store_kit_wrappers.dart';
import 'package:motorline_home/widgets/materials/appbar/appbar_title_widget.dart';
import 'package:motorline_home/widgets/materials/pop_button_widget.dart';
import 'package:rxdart/subjects.dart';
class SubscriptionPage extends StatefulWidget {
const SubscriptionPage({
Key? key,
}) : super(key: key);
@override
State<SubscriptionPage> createState() => _SubscriptionPageState();
}
class _SubscriptionPageState extends State<SubscriptionPage> {
// In app subscriptions
InAppPurchase _inAppPurchase = InAppPurchase.instance;
late StreamSubscription<List<PurchaseDetails>> _inAppPurchaseSubscription;
StreamController<List<ProductDetails>> _streamGooglePlaySubscriptions =
BehaviorSubject();
final List<String> _subscriptionsIDs = [
"basic",
"premium",
"enterprise",
];
@override
void initState() {
super.initState();
// In app purchase subscription
_inAppPurchaseSubscription =
_inAppPurchase.purchaseStream.listen((purchaseDetailsList) {
_listenToPurchaseUpdated(purchaseDetailsList);
}, onDone: () {
print("In app purchase onDone");
_inAppPurchaseSubscription.cancel();
}, onError: (error) {
print("In app purchase error: ${error.toString()}");
// handle error here.
_inAppPurchaseSubscription.cancel();
});
// Initialize in app purchase
_initializeInAppPurchase();
}
@override
void dispose() {
if (Platform.isIOS) {
final InAppPurchaseStoreKitPlatformAddition iosPlatformAddition =
_inAppPurchase
.getPlatformAddition<InAppPurchaseStoreKitPlatformAddition>();
iosPlatformAddition.setDelegate(null);
}
// Cancel in app purchase listener
_inAppPurchaseSubscription.cancel();
super.dispose();
}
@override
Widget build(BuildContext context) {
return Scaffold(
appBar: AppBar(
title: AppBarTitleWidget(
title: FlutterI18n.translate(context, "subscriptions"),
),
leading: PopButtonWidget(),
),
// Body
body: Container(),
);
}
void _initializeInAppPurchase() async {
print("Initializing in app purchase");
bool available = await _inAppPurchase.isAvailable();
print("In app purchase initialized: $available");
if (available) {
if (Platform.isIOS) {
final InAppPurchaseStoreKitPlatformAddition iosPlatformAddition =
_inAppPurchase
.getPlatformAddition<InAppPurchaseStoreKitPlatformAddition>();
await iosPlatformAddition.setDelegate(ExamplePaymentQueueDelegate());
}
// Get subscriptions
List<ProductDetails> subscriptions = await _getSubscriptions(
productIds:
_subscriptionsIDs.toSet(),
);
// Sort by price
subscriptions.sort((a, b) => a.rawPrice.compareTo(b.rawPrice));
// Add subscriptions to stream
_streamGooglePlaySubscriptions.add(subscriptions);
// DEBUG: Print subscriptions
print("In app purchase subscription subscriptions: ${subscriptions}");
for (var subscription in subscriptions) {
print("In app purchase plan: ${subscription.id}: ${subscription.rawPrice}");
print("In app purchase description: ${subscription.description}");
// HOW GET ALL PLANS IN EACH SUBSCRIPTION ID?
}
await InAppPurchase.instance.restorePurchases();
}
}
// In app purchase updates
void _listenToPurchaseUpdated(List<PurchaseDetails> purchaseDetailsList) {
purchaseDetailsList.forEach((PurchaseDetails purchaseDetails) async {
// If purchase is pending
if (purchaseDetails.status == PurchaseStatus.pending) {
print("In app purchase pending...");
// Show pending ui
} else {
if (purchaseDetails.status == PurchaseStatus.canceled) {
print("In app purchase cancelled");
}
// If purchase failed
if (purchaseDetails.status == PurchaseStatus.error) {
print("In app purchase error");
// Show error
} else if (purchaseDetails.status == PurchaseStatus.purchased ||
purchaseDetails.status == PurchaseStatus.restored) {
print("In app purchase restored or purchased");
}
if (purchaseDetails.pendingCompletePurchase) {
debugPrint("In app purchase complete purchased");
debugPrint(
"In app purchase purchase id : ${purchaseDetails.purchaseID}");
debugPrint(
"In app purchase server data : ${purchaseDetails.verificationData.serverVerificationData}");
debugPrint(
"In app purchase local data : ${purchaseDetails.verificationData.localVerificationData}");
// Verify purchase on backend
try {
// VALIDADE PURCHASE IN BACKEND
} catch (error) {
debugPrint("In app purchase error: ${error.toString()}");
}
}
}
});
}
// Get subscription
Future<List<ProductDetails>> _getSubscriptions(
{required Set<String> productIds}) async {
ProductDetailsResponse response =
await _inAppPurchase.queryProductDetails(productIds);
return response.productDetails;
}
}
/// Example implementation of the
/// [`SKPaymentQueueDelegate`](https://developer.apple.com/documentation/storekit/skpaymentqueuedelegate?language=objc).
///
/// The payment queue delegate can be implementated to provide information
/// needed to complete transactions.
class ExamplePaymentQueueDelegate implements SKPaymentQueueDelegateWrapper {
@override
bool shouldContinueTransaction(
SKPaymentTransactionWrapper transaction, SKStorefrontWrapper storefront) {
return true;
}
@override
bool shouldShowPriceConsent() {
return false;
}
}
| [
"What you like to do is not possible. You need to create for each subscription a new plan, you can't say a Premium Subscription does have a yearly and a monthly plan.\n",
"This is not possible at the moment since it's not supported by official plugin (and we don't know when it will be):\nhttps://github.com/flutter/flutter/issues/110909\nAs they mentioned, solution is one plan per subscription with the extra logic necessary in your app (e.g. to detect if subscription with ID PLUS-year is an upgrade of PLUS-month)\n"
] | [
0,
0
] | [] | [] | [
"flutter",
"in_app_purchase"
] | stackoverflow_0072561135_flutter_in_app_purchase.txt |
Q:
what's the meaning of "&::" in cpp?
In cpp,
I know & is used to get the address
and :: is used to get the member of class,
but, what's the usage of &:: ?
Here is the example of the function connect of qt5:
QtWidgetsApplication2::QtWidgetsApplication2(QWidget *parent)
: QMainWindow(parent)
{
ui.setupUi(this);
connect(ui.pushButton_select, &QPushButton::clicked,this,&::QtWidgetsApplication2::select_file);
}
void QtWidgetsApplication2::select_file()
{
file_path = QFileDialog::getOpenFileName(NULL, QStringLiteral("选择文件"), "D:", QStringLiteral("音频文件(*wav)"));
}
what's the meaning of &:: in this line :
connect(ui.pushButton_select, &QPushButton::clicked,this,&::QtWidgetsApplication2::select_file);
why not &QtWidgetsApplication2::select_file?
A:
:: is used to get the member of class
While this is a correct assumption it is not limited to just class. This operator give access to a scope. And when it has no identifiers on its left side it explicitly tells compiler to use the global scope to resolve the given name. If we look at the operator precedence order the scope resolution operator :: is on the top of the list, so it will be resolved first, then the address of operator &.
So if you would put parenthesis, your code should read as & ( (::QtWidgetsApplication2) ::select_file).
A:
what's the meaning of &:: in this line :
It essentially means that the topmost scope(occuring at the leftmost side) is the global scope. That is, we start by looking into the global namespace first.
why not &QtWidgetsApplication2::select_file?
This would mean that we start by looking into QtWidetsApplication2 first instead of the global namespace.
Consider the following contrived example:
namespace P
{
struct C
{
constexpr static int i = 0;
};
}
struct C
{
constexpr static int i = 0;
};
//-------------vvv-------->uses global class C
const int *j = &::C::i;
//-------------vv-------->uses class C in namespace P
const int *k = &P::C::i;
Demo
A:
The & operator is used in C++ to take the address of a variable or function. In this case, &QPushButton::clicked is taking the address of the clicked member function of the QPushButton class.
The :: operator is used in C++ to specify a member of a class, namespace, or enumeration. In this case, QPushButton::clicked is specifying the clicked member of the QPushButton class.
The combination &:: is used to take the address of a global function or static member function. In this case, &::QtWidgetsApplication2::select_file is taking the address of the select_file static member function of the QtWidgetsApplication2 class. This is necessary because the connect function expects the address of the function to be called when the signal is emitted.
It's also worth noting that you could write the same code using the std::bind function instead, like this:
connect(ui.pushButton_select, &QPushButton::clicked, std::bind(&QtWidgetsApplication2::select_file, this));
This has the same effect as the original code, but it uses the std::bind function to bind the this pointer to the select_file member function, allowing it to be called as a regular function rather than using the &:: syntax.
A:
In this case, the "&" symbol is used to take the address of a function, rather than a variable. This is often used in C++ when connecting signals to slots in the Qt framework. In this specific example, the "&QPushButton::clicked" part of the code is taking the address of the "clicked" signal in the QPushButton class, and the "&::QtWidgetsApplication2::select_file" part is taking the address of the "select_file" member function in the "QtWidgetsApplication2" class.
| what's the meaning of "&::" in cpp? | In cpp,
I know & is used to get the address
and :: is used to get the member of class,
but, what's the usage of &:: ?
Here is the example of the function connect of qt5:
QtWidgetsApplication2::QtWidgetsApplication2(QWidget *parent)
: QMainWindow(parent)
{
ui.setupUi(this);
connect(ui.pushButton_select, &QPushButton::clicked,this,&::QtWidgetsApplication2::select_file);
}
void QtWidgetsApplication2::select_file()
{
file_path = QFileDialog::getOpenFileName(NULL, QStringLiteral("选择文件"), "D:", QStringLiteral("音频文件(*wav)"));
}
what's the meaning of &:: in this line :
connect(ui.pushButton_select, &QPushButton::clicked,this,&::QtWidgetsApplication2::select_file);
why not &QtWidgetsApplication2::select_file?
| [
"\n:: is used to get the member of class\n\nWhile this is a correct assumption it is not limited to just class. This operator give access to a scope. And when it has no identifiers on its left side it explicitly tells compiler to use the global scope to resolve the given name. If we look at the operator precedence order the scope resolution operator :: is on the top of the list, so it will be resolved first, then the address of operator &.\nSo if you would put parenthesis, your code should read as & ( (::QtWidgetsApplication2) ::select_file).\n",
"\nwhat's the meaning of &:: in this line :\n\nIt essentially means that the topmost scope(occuring at the leftmost side) is the global scope. That is, we start by looking into the global namespace first.\n\n\nwhy not &QtWidgetsApplication2::select_file?\n\nThis would mean that we start by looking into QtWidetsApplication2 first instead of the global namespace.\n\nConsider the following contrived example:\nnamespace P\n{\n struct C\n {\n constexpr static int i = 0;\n };\n}\n\nstruct C\n{\n constexpr static int i = 0;\n};\n//-------------vvv-------->uses global class C\nconst int *j = &::C::i;\n//-------------vv-------->uses class C in namespace P\nconst int *k = &P::C::i;\n\nDemo\n",
"The & operator is used in C++ to take the address of a variable or function. In this case, &QPushButton::clicked is taking the address of the clicked member function of the QPushButton class.\nThe :: operator is used in C++ to specify a member of a class, namespace, or enumeration. In this case, QPushButton::clicked is specifying the clicked member of the QPushButton class.\nThe combination &:: is used to take the address of a global function or static member function. In this case, &::QtWidgetsApplication2::select_file is taking the address of the select_file static member function of the QtWidgetsApplication2 class. This is necessary because the connect function expects the address of the function to be called when the signal is emitted.\nIt's also worth noting that you could write the same code using the std::bind function instead, like this:\nconnect(ui.pushButton_select, &QPushButton::clicked, std::bind(&QtWidgetsApplication2::select_file, this));\n\nThis has the same effect as the original code, but it uses the std::bind function to bind the this pointer to the select_file member function, allowing it to be called as a regular function rather than using the &:: syntax.\n",
"In this case, the \"&\" symbol is used to take the address of a function, rather than a variable. This is often used in C++ when connecting signals to slots in the Qt framework. In this specific example, the \"&QPushButton::clicked\" part of the code is taking the address of the \"clicked\" signal in the QPushButton class, and the \"&::QtWidgetsApplication2::select_file\" part is taking the address of the \"select_file\" member function in the \"QtWidgetsApplication2\" class.\n"
] | [
2,
1,
0,
0
] | [] | [] | [
"c++",
"operator_keyword",
"qt",
"qt5"
] | stackoverflow_0074674894_c++_operator_keyword_qt_qt5.txt |
Q:
Preferred method to store PHP arrays (json_encode vs serialize)
I need to store a multi-dimensional associative array of data in a flat file for caching purposes. I might occasionally come across the need to convert it to JSON for use in my web app but the vast majority of the time I will be using the array directly in PHP.
Would it be more efficient to store the array as JSON or as a PHP serialized array in this text file? I've looked around and it seems that in the newest versions of PHP (5.3), json_decode is actually faster than unserialize.
I'm currently leaning towards storing the array as JSON as I feel its easier to read by a human if necessary, it can be used in both PHP and JavaScript with very little effort, and from what I've read, it might even be faster to decode (not sure about encoding, though).
Does anyone know of any pitfalls? Anyone have good benchmarks to show the performance benefits of either method?
A:
Depends on your priorities.
If performance is your absolute driving characteristic, then by all means use the fastest one. Just make sure you have a full understanding of the differences before you make a choice
Unlike serialize() you need to add extra parameter to keep UTF-8 characters untouched: json_encode($array, JSON_UNESCAPED_UNICODE) (otherwise it converts UTF-8 characters to Unicode escape sequences).
JSON will have no memory of what the object's original class was (they are always restored as instances of stdClass).
You can't leverage __sleep() and __wakeup() with JSON
By default, only public properties are serialized with JSON. (in PHP>=5.4 you can implement JsonSerializable to change this behavior).
JSON is more portable
And there's probably a few other differences I can't think of at the moment.
A simple speed test to compare the two
<?php
ini_set('display_errors', 1);
error_reporting(E_ALL);
// Make a big, honkin test array
// You may need to adjust this depth to avoid memory limit errors
$testArray = fillArray(0, 5);
// Time json encoding
$start = microtime(true);
json_encode($testArray);
$jsonTime = microtime(true) - $start;
echo "JSON encoded in $jsonTime seconds\n";
// Time serialization
$start = microtime(true);
serialize($testArray);
$serializeTime = microtime(true) - $start;
echo "PHP serialized in $serializeTime seconds\n";
// Compare them
if ($jsonTime < $serializeTime) {
printf("json_encode() was roughly %01.2f%% faster than serialize()\n", ($serializeTime / $jsonTime - 1) * 100);
}
else if ($serializeTime < $jsonTime ) {
printf("serialize() was roughly %01.2f%% faster than json_encode()\n", ($jsonTime / $serializeTime - 1) * 100);
} else {
echo "Impossible!\n";
}
function fillArray( $depth, $max ) {
static $seed;
if (is_null($seed)) {
$seed = array('a', 2, 'c', 4, 'e', 6, 'g', 8, 'i', 10);
}
if ($depth < $max) {
$node = array();
foreach ($seed as $key) {
$node[$key] = fillArray($depth + 1, $max);
}
return $node;
}
return 'empty';
}
A:
JSON is simpler and faster than PHP's serialization format and should be used unless:
You're storing deeply nested arrays:
json_decode(): "This function will return false if the JSON encoded data is deeper than 127 elements."
You're storing objects that need to be unserialized as the correct class
You're interacting with old PHP versions that don't support json_decode
A:
I've written a blogpost about this subject: "Cache a large array: JSON, serialize or var_export?". In this post it is shown that serialize is the best choice for small to large sized arrays. For very large arrays (> 70MB) JSON is the better choice.
A:
You might also be interested in https://github.com/phadej/igbinary - which provides a different serialization 'engine' for PHP.
My random/arbitrary 'performance' figures, using PHP 5.3.5 on a 64bit platform show :
JSON :
JSON encoded in 2.180496931076 seconds
JSON decoded in 9.8368630409241 seconds
serialized "String" size : 13993
Native PHP :
PHP serialized in 2.9125759601593 seconds
PHP unserialized in 6.4348418712616 seconds
serialized "String" size : 20769
Igbinary :
WIN igbinary serialized in 1.6099879741669 seconds
WIN igbinrary unserialized in 4.7737920284271 seconds
WIN serialized "String" Size : 4467
So, it's quicker to igbinary_serialize() and igbinary_unserialize() and uses less disk space.
I used the fillArray(0, 3) code as above, but made the array keys longer strings.
igbinary can store the same data types as PHP's native serialize can (So no problem with objects etc) and you can tell PHP5.3 to use it for session handling if you so wish.
See also http://ilia.ws/files/zendcon_2010_hidden_features.pdf - specifically slides 14/15/16
A:
Y just tested serialized and json encode and decode, plus the size it will take the string stored.
JSON encoded in 0.067085981369 seconds. Size (1277772)
PHP serialized in 0.12110209465 seconds. Size (1955548)
JSON decode in 0.22470498085 seconds
PHP serialized in 0.211947917938 seconds
json_encode() was roughly 80.52% faster than serialize()
unserialize() was roughly 6.02% faster than json_decode()
JSON string was roughly 53.04% smaller than Serialized string
We can conclude that JSON encodes faster and results a smaller string, but unserialize is faster to decode the string.
A:
If you are caching information that you will ultimately want to "include" at a later point in time, you may want to try using var_export. That way you only take the hit in the "serialize" and not in the "unserialize".
A:
I augmented the test to include unserialization performance. Here are the numbers I got.
Serialize
JSON encoded in 2.5738489627838 seconds
PHP serialized in 5.2861361503601 seconds
Serialize: json_encode() was roughly 105.38% faster than serialize()
Unserialize
JSON decode in 10.915472984314 seconds
PHP unserialized in 7.6223039627075 seconds
Unserialize: unserialize() was roughly 43.20% faster than json_decode()
So json seems to be faster for encoding but slow in decoding. So it could depend upon your application and what you expect to do the most.
A:
Really nice topic and after reading the few answers, I want to share my experiments on the subject.
I got a use case where some "huge" table needs to be queried almost every time I talk to the database (don't ask why, just a fact). The database caching system isn't appropriate as it'll not cache the different requests, so I though about php caching systems.
I tried apcu but it didn't fit the needs, memory isn't enough reliable in this case. Next step was to cache into a file with serialization.
Table has 14355 entries with 18 columns, those are my tests and stats on reading the serialized cache:
JSON:
As you all said, the major inconvenience with json_encode/json_decode is that it transforms everything to an StdClass instance (or Object). If you need to loop it, transforming it to an array is what you'll probably do, and yes it's increasing the transformation time
average time: 780.2 ms; memory use: 41.5MB; cache file size: 3.8MB
Msgpack
@hutch mentions msgpack. Pretty website. Let's give it a try shall we?
average time: 497 ms; memory use: 32MB; cache file size: 2.8MB
That's better, but requires a new extension; compiling sometimes afraid people...
IgBinary
@GingerDog mentions igbinary. Note that I've set the igbinary.compact_strings=Offbecause I care more about reading performances than file size.
average time: 411.4 ms; memory use: 36.75MB; cache file size: 3.3MB
Better than msg pack. Still, this one requires compiling too.
serialize/unserialize
average time: 477.2 ms; memory use: 36.25MB; cache file size: 5.9MB
Better performances than JSON, the bigger the array is, slower json_decode is, but you already new that.
Those external extensions are narrowing down the file size and seems great on paper. Numbers don't lie*. What's the point of compiling an extension if you get almost the same results that you'd have with a standard PHP function?
We can also deduce that depending on your needs, you will choose something different than someone else:
IgBinary is really nice and performs better than MsgPack
Msgpack is better at compressing your datas (note that I didn't tried the igbinary
compact.string option).
Don't want to compile? Use standards.
That's it, another serialization methods comparison to help you choose the one!
*Tested with PHPUnit 3.7.31, php 5.5.10 - only decoding with a standard hardrive and old dual core CPU - average numbers on 10 same use case tests, your stats might be different
A:
Seems like serialize is the one I'm going to use for 2 reasons:
Someone pointed out that unserialize is faster than json_decode and a 'read' case sounds more probable than a 'write' case.
I've had trouble with json_encode when having strings with invalid UTF-8 characters. When that happens the string ends up being empty causing loss of information.
A:
I know this is late but the answers are pretty old, I thought my benchmarks might help as I have just tested in PHP 7.4
Serialize/Unserialize is much faster than JSON, takes less memory and space, and wins outright in PHP 7.4 but I am not sure my test is the most efficient or the best,
I have basically created a PHP file which returns an array which I encoded, serialised, then decoded and unserialised.
$array = include __DIR__.'/../tests/data/dao/testfiles/testArray.php';
//JSON ENCODE
$json_encode_memory_start = memory_get_usage();
$json_encode_time_start = microtime(true);
for ($i=0; $i < 20000; $i++) {
$encoded = json_encode($array);
}
$json_encode_time_end = microtime(true);
$json_encode_memory_end = memory_get_usage();
$json_encode_time = $json_encode_time_end - $json_encode_time_start;
$json_encode_memory =
$json_encode_memory_end - $json_encode_memory_start;
//SERIALIZE
$serialize_memory_start = memory_get_usage();
$serialize_time_start = microtime(true);
for ($i=0; $i < 20000; $i++) {
$serialized = serialize($array);
}
$serialize_time_end = microtime(true);
$serialize_memory_end = memory_get_usage();
$serialize_time = $serialize_time_end - $serialize_time_start;
$serialize_memory = $serialize_memory_end - $serialize_memory_start;
//Write to file time:
$fpc_memory_start = memory_get_usage();
$fpc_time_start = microtime(true);
for ($i=0; $i < 20000; $i++) {
$fpc_bytes =
file_put_contents(
__DIR__.'/../tests/data/dao/testOneBigFile',
'<?php return '.var_export($array,true).' ?>;'
);
}
$fpc_time_end = microtime(true);
$fpc_memory_end = memory_get_usage();
$fpc_time = $fpc_time_end - $fpc_time_start;
$fpc_memory = $fpc_memory_end - $fpc_memory_start;
//JSON DECODE
$json_decode_memory_start = memory_get_usage();
$json_decode_time_start = microtime(true);
for ($i=0; $i < 20000; $i++) {
$decoded = json_encode($encoded);
}
$json_decode_time_end = microtime(true);
$json_decode_memory_end = memory_get_usage();
$json_decode_time = $json_decode_time_end - $json_decode_time_start;
$json_decode_memory =
$json_decode_memory_end - $json_decode_memory_start;
//UNSERIALIZE
$unserialize_memory_start = memory_get_usage();
$unserialize_time_start = microtime(true);
for ($i=0; $i < 20000; $i++) {
$unserialized = unserialize($serialized);
}
$unserialize_time_end = microtime(true);
$unserialize_memory_end = memory_get_usage();
$unserialize_time = $unserialize_time_end - $unserialize_time_start;
$unserialize_memory =
$unserialize_memory_end - $unserialize_memory_start;
//GET FROM VAR EXPORT:
$var_export_memory_start = memory_get_usage();
$var_export_time_start = microtime(true);
for ($i=0; $i < 20000; $i++) {
$array = include __DIR__.'/../tests/data/dao/testOneBigFile';
}
$var_export_time_end = microtime(true);
$var_export_memory_end = memory_get_usage();
$var_export_time = $var_export_time_end - $var_export_time_start;
$var_export_memory = $var_export_memory_end - $var_export_memory_start;
Results:
Var Export length: 11447
Serialized length: 11541
Json encoded length: 11895
file put contents Bytes: 11464
Json Encode Time: 1.9197590351105
Serialize Time: 0.160325050354
FPC Time: 6.2793469429016
Json Encode Memory: 12288
Serialize Memory: 12288
FPC Memory: 0
JSON Decoded time: 1.7493588924408
UnSerialize Time: 0.19309520721436
Var Export and Include: 3.1974139213562
JSON Decoded memory: 16384
UnSerialize Memory: 14360
Var Export and Include: 192
A:
I've tested this very thoroughly on a fairly complex, mildly nested multi-hash with all kinds of data in it (string, NULL, integers), and serialize/unserialize ended up much faster than json_encode/json_decode.
The only advantage json have in my tests was it's smaller 'packed' size.
These are done under PHP 5.3.3, let me know if you want more details.
Here are tests results then the code to produce them. I can't provide the test data since it'd reveal information that I can't let go out in the wild.
JSON encoded in 2.23700618744 seconds
PHP serialized in 1.3434419632 seconds
JSON decoded in 4.0405561924 seconds
PHP unserialized in 1.39393305779 seconds
serialized size : 14549
json_encode size : 11520
serialize() was roughly 66.51% faster than json_encode()
unserialize() was roughly 189.87% faster than json_decode()
json_encode() string was roughly 26.29% smaller than serialize()
// Time json encoding
$start = microtime( true );
for($i = 0; $i < 10000; $i++) {
json_encode( $test );
}
$jsonTime = microtime( true ) - $start;
echo "JSON encoded in $jsonTime seconds<br>";
// Time serialization
$start = microtime( true );
for($i = 0; $i < 10000; $i++) {
serialize( $test );
}
$serializeTime = microtime( true ) - $start;
echo "PHP serialized in $serializeTime seconds<br>";
// Time json decoding
$test2 = json_encode( $test );
$start = microtime( true );
for($i = 0; $i < 10000; $i++) {
json_decode( $test2 );
}
$jsonDecodeTime = microtime( true ) - $start;
echo "JSON decoded in $jsonDecodeTime seconds<br>";
// Time deserialization
$test2 = serialize( $test );
$start = microtime( true );
for($i = 0; $i < 10000; $i++) {
unserialize( $test2 );
}
$unserializeTime = microtime( true ) - $start;
echo "PHP unserialized in $unserializeTime seconds<br>";
$jsonSize = strlen(json_encode( $test ));
$phpSize = strlen(serialize( $test ));
echo "<p>serialized size : " . strlen(serialize( $test )) . "<br>";
echo "json_encode size : " . strlen(json_encode( $test )) . "<br></p>";
// Compare them
if ( $jsonTime < $serializeTime )
{
echo "json_encode() was roughly " . number_format( ($serializeTime / $jsonTime - 1 ) * 100, 2 ) . "% faster than serialize()";
}
else if ( $serializeTime < $jsonTime )
{
echo "serialize() was roughly " . number_format( ($jsonTime / $serializeTime - 1 ) * 100, 2 ) . "% faster than json_encode()";
} else {
echo 'Unpossible!';
}
echo '<BR>';
// Compare them
if ( $jsonDecodeTime < $unserializeTime )
{
echo "json_decode() was roughly " . number_format( ($unserializeTime / $jsonDecodeTime - 1 ) * 100, 2 ) . "% faster than unserialize()";
}
else if ( $unserializeTime < $jsonDecodeTime )
{
echo "unserialize() was roughly " . number_format( ($jsonDecodeTime / $unserializeTime - 1 ) * 100, 2 ) . "% faster than json_decode()";
} else {
echo 'Unpossible!';
}
echo '<BR>';
// Compare them
if ( $jsonSize < $phpSize )
{
echo "json_encode() string was roughly " . number_format( ($phpSize / $jsonSize - 1 ) * 100, 2 ) . "% smaller than serialize()";
}
else if ( $phpSize < $jsonSize )
{
echo "serialize() string was roughly " . number_format( ($jsonSize / $phpSize - 1 ) * 100, 2 ) . "% smaller than json_encode()";
} else {
echo 'Unpossible!';
}
A:
I made a small benchmark as well. My results were the same. But I need the decode performance. Where I noticed, like a few people above said as well, unserialize is faster than json_decode. unserialize takes roughly 60-70% of the json_decode time. So the conclusion is fairly simple:
When you need performance in encoding, use json_encode, when you need performance when decoding, use unserialize. Because you can not merge the two functions you have to make a choise where you need more performance.
My benchmark in pseudo:
Define array $arr with a few random keys and values
for x < 100; x++; serialize and json_encode a array_rand of $arr
for y < 1000; y++; json_decode the json encoded string - calc time
for y < 1000; y++; unserialize the serialized string - calc time
echo the result which was faster
On avarage: unserialize won 96 times over 4 times the json_decode. With an avarage of roughly 1.5ms over 2.5ms.
A:
Check out the results here (sorry for the hack putting the PHP code in the JS code box):
http://jsfiddle.net/newms87/h3b0a0ha/embedded/result/
RESULTS: serialize() and unserialize() are both significantly faster in PHP 5.4 on arrays of varying size.
I made a test script on real world data for comparing json_encode vs serialize and json_decode vs unserialize. The test was run on the caching system of an in production e-commerce site. It simply takes the data already in the cache, and tests the times to encode / decode (or serialize / unserialize) all the data and I put it in an easy to see table.
I ran this on PHP 5.4 shared hosting server.
The results were very conclusive that for these large to small data sets serialize and unserialize were the clear winners. In particular for my use case, the json_decode and unserialize are the most important for the caching system. Unserialize was almost an ubiquitous winner here. It was typically 2 to 4 times (sometimes 6 or 7 times) as fast as json_decode.
It is interesting to note the difference in results from @peter-bailey.
Here is the PHP code used to generate the results:
<?php
ini_set('display_errors', 1);
error_reporting(E_ALL);
function _count_depth($array)
{
$count = 0;
$max_depth = 0;
foreach ($array as $a) {
if (is_array($a)) {
list($cnt, $depth) = _count_depth($a);
$count += $cnt;
$max_depth = max($max_depth, $depth);
} else {
$count++;
}
}
return array(
$count,
$max_depth + 1,
);
}
function run_test($file)
{
$memory = memory_get_usage();
$test_array = unserialize(file_get_contents($file));
$memory = round((memory_get_usage() - $memory) / 1024, 2);
if (empty($test_array) || !is_array($test_array)) {
return;
}
list($count, $depth) = _count_depth($test_array);
//JSON encode test
$start = microtime(true);
$json_encoded = json_encode($test_array);
$json_encode_time = microtime(true) - $start;
//JSON decode test
$start = microtime(true);
json_decode($json_encoded);
$json_decode_time = microtime(true) - $start;
//serialize test
$start = microtime(true);
$serialized = serialize($test_array);
$serialize_time = microtime(true) - $start;
//unserialize test
$start = microtime(true);
unserialize($serialized);
$unserialize_time = microtime(true) - $start;
return array(
'Name' => basename($file),
'json_encode() Time (s)' => $json_encode_time,
'json_decode() Time (s)' => $json_decode_time,
'serialize() Time (s)' => $serialize_time,
'unserialize() Time (s)' => $unserialize_time,
'Elements' => $count,
'Memory (KB)' => $memory,
'Max Depth' => $depth,
'json_encode() Win' => ($json_encode_time > 0 && $json_encode_time < $serialize_time) ? number_format(($serialize_time / $json_encode_time - 1) * 100, 2) : '',
'serialize() Win' => ($serialize_time > 0 && $serialize_time < $json_encode_time) ? number_format(($json_encode_time / $serialize_time - 1) * 100, 2) : '',
'json_decode() Win' => ($json_decode_time > 0 && $json_decode_time < $serialize_time) ? number_format(($serialize_time / $json_decode_time - 1) * 100, 2) : '',
'unserialize() Win' => ($unserialize_time > 0 && $unserialize_time < $json_decode_time) ? number_format(($json_decode_time / $unserialize_time - 1) * 100, 2) : '',
);
}
$files = glob(dirname(__FILE__) . '/system/cache/*');
$data = array();
foreach ($files as $file) {
if (is_file($file)) {
$result = run_test($file);
if ($result) {
$data[] = $result;
}
}
}
uasort($data, function ($a, $b) {
return $a['Memory (KB)'] < $b['Memory (KB)'];
});
$fields = array_keys($data[0]);
?>
<table>
<thead>
<tr>
<?php foreach ($fields as $f) { ?>
<td style="text-align: center; border:1px solid black;padding: 4px 8px;font-weight:bold;font-size:1.1em"><?= $f; ?></td>
<?php } ?>
</tr>
</thead>
<tbody>
<?php foreach ($data as $d) { ?>
<tr>
<?php foreach ($d as $key => $value) { ?>
<?php $is_win = strpos($key, 'Win'); ?>
<?php $color = ($is_win && $value) ? 'color: green;font-weight:bold;' : ''; ?>
<td style="text-align: center; vertical-align: middle; padding: 3px 6px; border: 1px solid gray; <?= $color; ?>"><?= $value . (($is_win && $value) ? '%' : ''); ?></td>
<?php } ?>
</tr>
<?php } ?>
</tbody>
</table>
A:
First, I changed the script to do some more benchmarking (and also do 1000 runs instead of just 1):
<?php
ini_set('display_errors', 1);
error_reporting(E_ALL);
// Make a big, honkin test array
// You may need to adjust this depth to avoid memory limit errors
$testArray = fillArray(0, 5);
$totalJsonTime = 0;
$totalSerializeTime = 0;
$totalJsonWins = 0;
for ($i = 0; $i < 1000; $i++) {
// Time json encoding
$start = microtime(true);
$json = json_encode($testArray);
$jsonTime = microtime(true) - $start;
$totalJsonTime += $jsonTime;
// Time serialization
$start = microtime(true);
$serial = serialize($testArray);
$serializeTime = microtime(true) - $start;
$totalSerializeTime += $serializeTime;
if ($jsonTime < $serializeTime) {
$totalJsonWins++;
}
}
$totalSerializeWins = 1000 - $totalJsonWins;
// Compare them
if ($totalJsonTime < $totalSerializeTime) {
printf("json_encode() (wins: $totalJsonWins) was roughly %01.2f%% faster than serialize()\n", ($totalSerializeTime / $totalJsonTime - 1) * 100);
} else {
printf("serialize() (wins: $totalSerializeWins) was roughly %01.2f%% faster than json_encode()\n", ($totalJsonTime / $totalSerializeTime - 1) * 100);
}
$totalJsonTime = 0;
$totalJson2Time = 0;
$totalSerializeTime = 0;
$totalJsonWins = 0;
for ($i = 0; $i < 1000; $i++) {
// Time json decoding
$start = microtime(true);
$orig = json_decode($json, true);
$jsonTime = microtime(true) - $start;
$totalJsonTime += $jsonTime;
$start = microtime(true);
$origObj = json_decode($json);
$jsonTime2 = microtime(true) - $start;
$totalJson2Time += $jsonTime2;
// Time serialization
$start = microtime(true);
$unserial = unserialize($serial);
$serializeTime = microtime(true) - $start;
$totalSerializeTime += $serializeTime;
if ($jsonTime < $serializeTime) {
$totalJsonWins++;
}
}
$totalSerializeWins = 1000 - $totalJsonWins;
// Compare them
if ($totalJsonTime < $totalSerializeTime) {
printf("json_decode() was roughly %01.2f%% faster than unserialize()\n", ($totalSerializeTime / $totalJsonTime - 1) * 100);
} else {
printf("unserialize() (wins: $totalSerializeWins) was roughly %01.2f%% faster than json_decode()\n", ($totalJsonTime / $totalSerializeTime - 1) * 100);
}
// Compare them
if ($totalJson2Time < $totalSerializeTime) {
printf("json_decode() was roughly %01.2f%% faster than unserialize()\n", ($totalSerializeTime / $totalJson2Time - 1) * 100);
} else {
printf("unserialize() (wins: $totalSerializeWins) was roughly %01.2f%% faster than array json_decode()\n", ($totalJson2Time / $totalSerializeTime - 1) * 100);
}
function fillArray( $depth, $max ) {
static $seed;
if (is_null($seed)) {
$seed = array('a', 2, 'c', 4, 'e', 6, 'g', 8, 'i', 10);
}
if ($depth < $max) {
$node = array();
foreach ($seed as $key) {
$node[$key] = fillArray($depth + 1, $max);
}
return $node;
}
return 'empty';
}
I used this build of PHP 7:
PHP 7.0.14 (cli) (built: Jan 18 2017 19:13:23) ( NTS ) Copyright (c)
1997-2016 The PHP Group Zend Engine v3.0.0, Copyright (c) 1998-2016
Zend Technologies
with Zend OPcache v7.0.14, Copyright (c) 1999-2016, by Zend Technologies
And my results were:
serialize() (wins: 999) was roughly 10.98% faster than json_encode()
unserialize() (wins: 987) was roughly 33.26% faster than json_decode()
unserialize() (wins: 987) was roughly 48.35% faster than array
json_decode()
So clearly, serialize/unserialize is the fastest method, while json_encode/decode is the most portable.
If you consider a scenario where you read/write serialized data 10x or more often than you need to send to or receive from a non-PHP system, you are STILL better off to use serialize/unserialize and have it json_encode or json_decode prior to serialization in terms of time.
A:
Before you make your final decision, be aware that the JSON format is not safe for associative arrays - json_decode() will return them as objects instead:
$config = array(
'Frodo' => 'hobbit',
'Gimli' => 'dwarf',
'Gandalf' => 'wizard',
);
print_r($config);
print_r(json_decode(json_encode($config)));
Output is:
Array
(
[Frodo] => hobbit
[Gimli] => dwarf
[Gandalf] => wizard
)
stdClass Object
(
[Frodo] => hobbit
[Gimli] => dwarf
[Gandalf] => wizard
)
A:
just an fyi -- if you want to serialize your data to something easy to read and understand like JSON but with more compression and higher performance, you should check out messagepack.
A:
THX - for this benchmark code:
My results on array I use for configuration are as fallows:
JSON encoded in 0.0031511783599854 seconds
PHP serialized in 0.0037961006164551 seconds
json_encode() was roughly 20.47% faster than serialize()
JSON encoded in 0.0070841312408447 seconds
PHP serialized in 0.0035839080810547 seconds
unserialize() was roughly 97.66% faster than json_encode()
So - test it on your own data.
A:
JSON is better if you want to backup Data and restore it on a different machine or via FTP.
For example with serialize if you store data on a Windows server, download it via FTP and restore it on a Linux one it could not work any more due to the charachter re-encoding, because serialize stores the length of the strings and in the Unicode > UTF-8 transcoding some 1 byte charachter could became 2 bytes long making the algorithm crash.
A:
If to summ up what people say here, json_decode/encode seems faster than serialize/unserialize BUT
If you do var_dump the type of the serialized object is changed.
If for some reason you want to keep the type, go with serialize!
(try for example stdClass vs array)
serialize/unserialize:
Array cache:
array (size=2)
'a' => string '1' (length=1)
'b' => int 2
Object cache:
object(stdClass)[8]
public 'field1' => int 123
This cache:
object(Controller\Test)[8]
protected 'view' =>
json encode/decode
Array cache:
object(stdClass)[7]
public 'a' => string '1' (length=1)
public 'b' => int 2
Object cache:
object(stdClass)[8]
public 'field1' => int 123
This cache:
object(stdClass)[8]
As you can see the json_encode/decode converts all to stdClass, which is not that good, object info lost... So decide based on needs, especially if it is not only arrays...
A:
Based on the information provided, it would be more efficient to store the array as JSON in this scenario. JSON is a widely-used and standardized format that is easy to read and can be easily converted to and from PHP arrays. Additionally, as you mentioned, json_decode is faster than unserialize in newer versions of PHP.
One potential pitfall of using JSON is that it is not as efficient in terms of space usage as serialized arrays. However, in most cases this should not be a significant concern.
As for benchmarks, it is difficult to provide specific numbers without conducting tests on your specific data and use case. However, in general, using JSON for storing and manipulating arrays in PHP is considered to be more efficient and easier to work with than serialized arrays.
| Preferred method to store PHP arrays (json_encode vs serialize) | I need to store a multi-dimensional associative array of data in a flat file for caching purposes. I might occasionally come across the need to convert it to JSON for use in my web app but the vast majority of the time I will be using the array directly in PHP.
Would it be more efficient to store the array as JSON or as a PHP serialized array in this text file? I've looked around and it seems that in the newest versions of PHP (5.3), json_decode is actually faster than unserialize.
I'm currently leaning towards storing the array as JSON as I feel its easier to read by a human if necessary, it can be used in both PHP and JavaScript with very little effort, and from what I've read, it might even be faster to decode (not sure about encoding, though).
Does anyone know of any pitfalls? Anyone have good benchmarks to show the performance benefits of either method?
| [
"Depends on your priorities.\nIf performance is your absolute driving characteristic, then by all means use the fastest one. Just make sure you have a full understanding of the differences before you make a choice\n\nUnlike serialize() you need to add extra parameter to keep UTF-8 characters untouched: json_encode($array, JSON_UNESCAPED_UNICODE) (otherwise it converts UTF-8 characters to Unicode escape sequences).\nJSON will have no memory of what the object's original class was (they are always restored as instances of stdClass).\nYou can't leverage __sleep() and __wakeup() with JSON\nBy default, only public properties are serialized with JSON. (in PHP>=5.4 you can implement JsonSerializable to change this behavior).\nJSON is more portable\n\nAnd there's probably a few other differences I can't think of at the moment.\nA simple speed test to compare the two\n<?php\n\nini_set('display_errors', 1);\nerror_reporting(E_ALL);\n\n// Make a big, honkin test array\n// You may need to adjust this depth to avoid memory limit errors\n$testArray = fillArray(0, 5);\n\n// Time json encoding\n$start = microtime(true);\njson_encode($testArray);\n$jsonTime = microtime(true) - $start;\necho \"JSON encoded in $jsonTime seconds\\n\";\n\n// Time serialization\n$start = microtime(true);\nserialize($testArray);\n$serializeTime = microtime(true) - $start;\necho \"PHP serialized in $serializeTime seconds\\n\";\n\n// Compare them\nif ($jsonTime < $serializeTime) {\n printf(\"json_encode() was roughly %01.2f%% faster than serialize()\\n\", ($serializeTime / $jsonTime - 1) * 100);\n}\nelse if ($serializeTime < $jsonTime ) {\n printf(\"serialize() was roughly %01.2f%% faster than json_encode()\\n\", ($jsonTime / $serializeTime - 1) * 100);\n} else {\n echo \"Impossible!\\n\";\n}\n\nfunction fillArray( $depth, $max ) {\n static $seed;\n if (is_null($seed)) {\n $seed = array('a', 2, 'c', 4, 'e', 6, 'g', 8, 'i', 10);\n }\n if ($depth < $max) {\n $node = array();\n foreach ($seed as $key) {\n $node[$key] = fillArray($depth + 1, $max);\n }\n return $node;\n }\n return 'empty';\n}\n\n",
"JSON is simpler and faster than PHP's serialization format and should be used unless:\n\nYou're storing deeply nested arrays:\njson_decode(): \"This function will return false if the JSON encoded data is deeper than 127 elements.\"\nYou're storing objects that need to be unserialized as the correct class\nYou're interacting with old PHP versions that don't support json_decode\n\n",
"I've written a blogpost about this subject: \"Cache a large array: JSON, serialize or var_export?\". In this post it is shown that serialize is the best choice for small to large sized arrays. For very large arrays (> 70MB) JSON is the better choice.\n",
"You might also be interested in https://github.com/phadej/igbinary - which provides a different serialization 'engine' for PHP.\nMy random/arbitrary 'performance' figures, using PHP 5.3.5 on a 64bit platform show :\nJSON :\n\nJSON encoded in 2.180496931076 seconds\nJSON decoded in 9.8368630409241 seconds\nserialized \"String\" size : 13993\n\nNative PHP :\n\nPHP serialized in 2.9125759601593 seconds\nPHP unserialized in 6.4348418712616 seconds\nserialized \"String\" size : 20769\n\nIgbinary :\n\nWIN igbinary serialized in 1.6099879741669 seconds\nWIN igbinrary unserialized in 4.7737920284271 seconds\nWIN serialized \"String\" Size : 4467\n\nSo, it's quicker to igbinary_serialize() and igbinary_unserialize() and uses less disk space.\nI used the fillArray(0, 3) code as above, but made the array keys longer strings.\nigbinary can store the same data types as PHP's native serialize can (So no problem with objects etc) and you can tell PHP5.3 to use it for session handling if you so wish.\nSee also http://ilia.ws/files/zendcon_2010_hidden_features.pdf - specifically slides 14/15/16\n",
"Y just tested serialized and json encode and decode, plus the size it will take the string stored.\nJSON encoded in 0.067085981369 seconds. Size (1277772)\nPHP serialized in 0.12110209465 seconds. Size (1955548)\nJSON decode in 0.22470498085 seconds\nPHP serialized in 0.211947917938 seconds\njson_encode() was roughly 80.52% faster than serialize()\nunserialize() was roughly 6.02% faster than json_decode()\nJSON string was roughly 53.04% smaller than Serialized string\n\nWe can conclude that JSON encodes faster and results a smaller string, but unserialize is faster to decode the string.\n",
"If you are caching information that you will ultimately want to \"include\" at a later point in time, you may want to try using var_export. That way you only take the hit in the \"serialize\" and not in the \"unserialize\".\n",
"I augmented the test to include unserialization performance. Here are the numbers I got.\nSerialize\n\nJSON encoded in 2.5738489627838 seconds\nPHP serialized in 5.2861361503601 seconds\nSerialize: json_encode() was roughly 105.38% faster than serialize()\n\n\nUnserialize\n\nJSON decode in 10.915472984314 seconds\nPHP unserialized in 7.6223039627075 seconds\nUnserialize: unserialize() was roughly 43.20% faster than json_decode() \n\nSo json seems to be faster for encoding but slow in decoding. So it could depend upon your application and what you expect to do the most.\n",
"Really nice topic and after reading the few answers, I want to share my experiments on the subject.\nI got a use case where some \"huge\" table needs to be queried almost every time I talk to the database (don't ask why, just a fact). The database caching system isn't appropriate as it'll not cache the different requests, so I though about php caching systems.\nI tried apcu but it didn't fit the needs, memory isn't enough reliable in this case. Next step was to cache into a file with serialization.\nTable has 14355 entries with 18 columns, those are my tests and stats on reading the serialized cache:\nJSON:\nAs you all said, the major inconvenience with json_encode/json_decode is that it transforms everything to an StdClass instance (or Object). If you need to loop it, transforming it to an array is what you'll probably do, and yes it's increasing the transformation time\n\naverage time: 780.2 ms; memory use: 41.5MB; cache file size: 3.8MB\n\nMsgpack\n@hutch mentions msgpack. Pretty website. Let's give it a try shall we?\n\naverage time: 497 ms; memory use: 32MB; cache file size: 2.8MB\n\nThat's better, but requires a new extension; compiling sometimes afraid people...\nIgBinary\n@GingerDog mentions igbinary. Note that I've set the igbinary.compact_strings=Offbecause I care more about reading performances than file size.\n\naverage time: 411.4 ms; memory use: 36.75MB; cache file size: 3.3MB\n\nBetter than msg pack. Still, this one requires compiling too.\nserialize/unserialize\n\naverage time: 477.2 ms; memory use: 36.25MB; cache file size: 5.9MB\n\nBetter performances than JSON, the bigger the array is, slower json_decode is, but you already new that.\nThose external extensions are narrowing down the file size and seems great on paper. Numbers don't lie*. What's the point of compiling an extension if you get almost the same results that you'd have with a standard PHP function?\nWe can also deduce that depending on your needs, you will choose something different than someone else:\n\nIgBinary is really nice and performs better than MsgPack\nMsgpack is better at compressing your datas (note that I didn't tried the igbinary\ncompact.string option).\nDon't want to compile? Use standards.\n\nThat's it, another serialization methods comparison to help you choose the one!\n*Tested with PHPUnit 3.7.31, php 5.5.10 - only decoding with a standard hardrive and old dual core CPU - average numbers on 10 same use case tests, your stats might be different\n",
"Seems like serialize is the one I'm going to use for 2 reasons:\n\nSomeone pointed out that unserialize is faster than json_decode and a 'read' case sounds more probable than a 'write' case.\nI've had trouble with json_encode when having strings with invalid UTF-8 characters. When that happens the string ends up being empty causing loss of information.\n\n",
"I know this is late but the answers are pretty old, I thought my benchmarks might help as I have just tested in PHP 7.4\nSerialize/Unserialize is much faster than JSON, takes less memory and space, and wins outright in PHP 7.4 but I am not sure my test is the most efficient or the best,\nI have basically created a PHP file which returns an array which I encoded, serialised, then decoded and unserialised.\n$array = include __DIR__.'/../tests/data/dao/testfiles/testArray.php';\n\n//JSON ENCODE\n$json_encode_memory_start = memory_get_usage();\n$json_encode_time_start = microtime(true);\n\nfor ($i=0; $i < 20000; $i++) { \n $encoded = json_encode($array);\n}\n\n$json_encode_time_end = microtime(true);\n$json_encode_memory_end = memory_get_usage();\n$json_encode_time = $json_encode_time_end - $json_encode_time_start;\n$json_encode_memory = \n$json_encode_memory_end - $json_encode_memory_start;\n\n\n//SERIALIZE\n$serialize_memory_start = memory_get_usage();\n$serialize_time_start = microtime(true);\n\nfor ($i=0; $i < 20000; $i++) { \n $serialized = serialize($array);\n}\n\n$serialize_time_end = microtime(true);\n$serialize_memory_end = memory_get_usage();\n$serialize_time = $serialize_time_end - $serialize_time_start;\n$serialize_memory = $serialize_memory_end - $serialize_memory_start;\n\n\n//Write to file time:\n$fpc_memory_start = memory_get_usage();\n$fpc_time_start = microtime(true);\n\nfor ($i=0; $i < 20000; $i++) { \n $fpc_bytes = \n file_put_contents(\n __DIR__.'/../tests/data/dao/testOneBigFile',\n '<?php return '.var_export($array,true).' ?>;'\n );\n}\n\n$fpc_time_end = microtime(true);\n$fpc_memory_end = memory_get_usage();\n$fpc_time = $fpc_time_end - $fpc_time_start;\n$fpc_memory = $fpc_memory_end - $fpc_memory_start;\n\n\n//JSON DECODE\n$json_decode_memory_start = memory_get_usage();\n$json_decode_time_start = microtime(true);\n\nfor ($i=0; $i < 20000; $i++) { \n $decoded = json_encode($encoded);\n}\n\n$json_decode_time_end = microtime(true);\n$json_decode_memory_end = memory_get_usage();\n$json_decode_time = $json_decode_time_end - $json_decode_time_start;\n$json_decode_memory = \n$json_decode_memory_end - $json_decode_memory_start;\n\n\n//UNSERIALIZE\n$unserialize_memory_start = memory_get_usage();\n$unserialize_time_start = microtime(true);\n\nfor ($i=0; $i < 20000; $i++) { \n $unserialized = unserialize($serialized);\n}\n\n$unserialize_time_end = microtime(true);\n$unserialize_memory_end = memory_get_usage();\n$unserialize_time = $unserialize_time_end - $unserialize_time_start;\n$unserialize_memory = \n$unserialize_memory_end - $unserialize_memory_start;\n\n\n//GET FROM VAR EXPORT:\n$var_export_memory_start = memory_get_usage();\n$var_export_time_start = microtime(true);\n\nfor ($i=0; $i < 20000; $i++) { \n $array = include __DIR__.'/../tests/data/dao/testOneBigFile';\n}\n\n$var_export_time_end = microtime(true);\n$var_export_memory_end = memory_get_usage();\n$var_export_time = $var_export_time_end - $var_export_time_start;\n$var_export_memory = $var_export_memory_end - $var_export_memory_start;\n\nResults:\nVar Export length: 11447\nSerialized length: 11541\nJson encoded length: 11895\nfile put contents Bytes: 11464\nJson Encode Time: 1.9197590351105\nSerialize Time: 0.160325050354\nFPC Time: 6.2793469429016\nJson Encode Memory: 12288\nSerialize Memory: 12288\nFPC Memory: 0\nJSON Decoded time: 1.7493588924408\nUnSerialize Time: 0.19309520721436\nVar Export and Include: 3.1974139213562\nJSON Decoded memory: 16384\nUnSerialize Memory: 14360\nVar Export and Include: 192\n",
"I've tested this very thoroughly on a fairly complex, mildly nested multi-hash with all kinds of data in it (string, NULL, integers), and serialize/unserialize ended up much faster than json_encode/json_decode.\nThe only advantage json have in my tests was it's smaller 'packed' size.\nThese are done under PHP 5.3.3, let me know if you want more details.\nHere are tests results then the code to produce them. I can't provide the test data since it'd reveal information that I can't let go out in the wild.\nJSON encoded in 2.23700618744 seconds\nPHP serialized in 1.3434419632 seconds\nJSON decoded in 4.0405561924 seconds\nPHP unserialized in 1.39393305779 seconds\n\nserialized size : 14549\njson_encode size : 11520\nserialize() was roughly 66.51% faster than json_encode()\nunserialize() was roughly 189.87% faster than json_decode()\njson_encode() string was roughly 26.29% smaller than serialize()\n\n// Time json encoding\n$start = microtime( true );\nfor($i = 0; $i < 10000; $i++) {\n json_encode( $test );\n}\n$jsonTime = microtime( true ) - $start;\necho \"JSON encoded in $jsonTime seconds<br>\";\n\n// Time serialization\n$start = microtime( true );\nfor($i = 0; $i < 10000; $i++) {\n serialize( $test );\n}\n$serializeTime = microtime( true ) - $start;\necho \"PHP serialized in $serializeTime seconds<br>\";\n\n// Time json decoding\n$test2 = json_encode( $test );\n$start = microtime( true );\nfor($i = 0; $i < 10000; $i++) {\n json_decode( $test2 );\n}\n$jsonDecodeTime = microtime( true ) - $start;\necho \"JSON decoded in $jsonDecodeTime seconds<br>\";\n\n// Time deserialization\n$test2 = serialize( $test );\n$start = microtime( true );\nfor($i = 0; $i < 10000; $i++) {\n unserialize( $test2 );\n}\n$unserializeTime = microtime( true ) - $start;\necho \"PHP unserialized in $unserializeTime seconds<br>\";\n\n$jsonSize = strlen(json_encode( $test ));\n$phpSize = strlen(serialize( $test ));\n\necho \"<p>serialized size : \" . strlen(serialize( $test )) . \"<br>\";\necho \"json_encode size : \" . strlen(json_encode( $test )) . \"<br></p>\";\n\n// Compare them\nif ( $jsonTime < $serializeTime )\n{\n echo \"json_encode() was roughly \" . number_format( ($serializeTime / $jsonTime - 1 ) * 100, 2 ) . \"% faster than serialize()\";\n}\nelse if ( $serializeTime < $jsonTime )\n{\n echo \"serialize() was roughly \" . number_format( ($jsonTime / $serializeTime - 1 ) * 100, 2 ) . \"% faster than json_encode()\";\n} else {\n echo 'Unpossible!';\n}\n echo '<BR>';\n\n// Compare them\nif ( $jsonDecodeTime < $unserializeTime )\n{\n echo \"json_decode() was roughly \" . number_format( ($unserializeTime / $jsonDecodeTime - 1 ) * 100, 2 ) . \"% faster than unserialize()\";\n}\nelse if ( $unserializeTime < $jsonDecodeTime )\n{\n echo \"unserialize() was roughly \" . number_format( ($jsonDecodeTime / $unserializeTime - 1 ) * 100, 2 ) . \"% faster than json_decode()\";\n} else {\n echo 'Unpossible!';\n}\n echo '<BR>';\n// Compare them\nif ( $jsonSize < $phpSize )\n{\n echo \"json_encode() string was roughly \" . number_format( ($phpSize / $jsonSize - 1 ) * 100, 2 ) . \"% smaller than serialize()\";\n}\nelse if ( $phpSize < $jsonSize )\n{\n echo \"serialize() string was roughly \" . number_format( ($jsonSize / $phpSize - 1 ) * 100, 2 ) . \"% smaller than json_encode()\";\n} else {\n echo 'Unpossible!';\n}\n\n",
"I made a small benchmark as well. My results were the same. But I need the decode performance. Where I noticed, like a few people above said as well, unserialize is faster than json_decode. unserialize takes roughly 60-70% of the json_decode time. So the conclusion is fairly simple:\nWhen you need performance in encoding, use json_encode, when you need performance when decoding, use unserialize. Because you can not merge the two functions you have to make a choise where you need more performance.\nMy benchmark in pseudo:\n\nDefine array $arr with a few random keys and values\nfor x < 100; x++; serialize and json_encode a array_rand of $arr\nfor y < 1000; y++; json_decode the json encoded string - calc time\nfor y < 1000; y++; unserialize the serialized string - calc time\necho the result which was faster\n\nOn avarage: unserialize won 96 times over 4 times the json_decode. With an avarage of roughly 1.5ms over 2.5ms.\n",
"Check out the results here (sorry for the hack putting the PHP code in the JS code box):\nhttp://jsfiddle.net/newms87/h3b0a0ha/embedded/result/\nRESULTS: serialize() and unserialize() are both significantly faster in PHP 5.4 on arrays of varying size.\nI made a test script on real world data for comparing json_encode vs serialize and json_decode vs unserialize. The test was run on the caching system of an in production e-commerce site. It simply takes the data already in the cache, and tests the times to encode / decode (or serialize / unserialize) all the data and I put it in an easy to see table.\nI ran this on PHP 5.4 shared hosting server.\nThe results were very conclusive that for these large to small data sets serialize and unserialize were the clear winners. In particular for my use case, the json_decode and unserialize are the most important for the caching system. Unserialize was almost an ubiquitous winner here. It was typically 2 to 4 times (sometimes 6 or 7 times) as fast as json_decode.\nIt is interesting to note the difference in results from @peter-bailey.\nHere is the PHP code used to generate the results:\n<?php\n\nini_set('display_errors', 1);\nerror_reporting(E_ALL);\n\nfunction _count_depth($array)\n{\n $count = 0;\n $max_depth = 0;\n foreach ($array as $a) {\n if (is_array($a)) {\n list($cnt, $depth) = _count_depth($a);\n $count += $cnt;\n $max_depth = max($max_depth, $depth);\n } else {\n $count++;\n }\n }\n\n return array(\n $count,\n $max_depth + 1,\n );\n}\n\nfunction run_test($file)\n{\n $memory = memory_get_usage();\n $test_array = unserialize(file_get_contents($file));\n $memory = round((memory_get_usage() - $memory) / 1024, 2);\n\n if (empty($test_array) || !is_array($test_array)) {\n return;\n }\n\n list($count, $depth) = _count_depth($test_array);\n\n //JSON encode test\n $start = microtime(true);\n $json_encoded = json_encode($test_array);\n $json_encode_time = microtime(true) - $start;\n\n //JSON decode test\n $start = microtime(true);\n json_decode($json_encoded);\n $json_decode_time = microtime(true) - $start;\n\n //serialize test\n $start = microtime(true);\n $serialized = serialize($test_array);\n $serialize_time = microtime(true) - $start;\n\n //unserialize test\n $start = microtime(true);\n unserialize($serialized);\n $unserialize_time = microtime(true) - $start;\n\n return array(\n 'Name' => basename($file),\n 'json_encode() Time (s)' => $json_encode_time,\n 'json_decode() Time (s)' => $json_decode_time,\n 'serialize() Time (s)' => $serialize_time,\n 'unserialize() Time (s)' => $unserialize_time,\n 'Elements' => $count,\n 'Memory (KB)' => $memory,\n 'Max Depth' => $depth,\n 'json_encode() Win' => ($json_encode_time > 0 && $json_encode_time < $serialize_time) ? number_format(($serialize_time / $json_encode_time - 1) * 100, 2) : '',\n 'serialize() Win' => ($serialize_time > 0 && $serialize_time < $json_encode_time) ? number_format(($json_encode_time / $serialize_time - 1) * 100, 2) : '',\n 'json_decode() Win' => ($json_decode_time > 0 && $json_decode_time < $serialize_time) ? number_format(($serialize_time / $json_decode_time - 1) * 100, 2) : '',\n 'unserialize() Win' => ($unserialize_time > 0 && $unserialize_time < $json_decode_time) ? number_format(($json_decode_time / $unserialize_time - 1) * 100, 2) : '',\n );\n}\n\n$files = glob(dirname(__FILE__) . '/system/cache/*');\n\n$data = array();\n\nforeach ($files as $file) {\n if (is_file($file)) {\n $result = run_test($file);\n\n if ($result) {\n $data[] = $result;\n }\n }\n}\n\nuasort($data, function ($a, $b) {\n return $a['Memory (KB)'] < $b['Memory (KB)'];\n});\n\n$fields = array_keys($data[0]);\n?>\n\n<table>\n <thead>\n <tr>\n <?php foreach ($fields as $f) { ?>\n <td style=\"text-align: center; border:1px solid black;padding: 4px 8px;font-weight:bold;font-size:1.1em\"><?= $f; ?></td>\n <?php } ?>\n </tr>\n </thead>\n\n <tbody>\n <?php foreach ($data as $d) { ?>\n <tr>\n <?php foreach ($d as $key => $value) { ?>\n <?php $is_win = strpos($key, 'Win'); ?>\n <?php $color = ($is_win && $value) ? 'color: green;font-weight:bold;' : ''; ?>\n <td style=\"text-align: center; vertical-align: middle; padding: 3px 6px; border: 1px solid gray; <?= $color; ?>\"><?= $value . (($is_win && $value) ? '%' : ''); ?></td>\n <?php } ?>\n </tr>\n <?php } ?>\n </tbody>\n</table>\n\n",
"First, I changed the script to do some more benchmarking (and also do 1000 runs instead of just 1):\n<?php\n\nini_set('display_errors', 1);\nerror_reporting(E_ALL);\n\n// Make a big, honkin test array\n// You may need to adjust this depth to avoid memory limit errors\n$testArray = fillArray(0, 5);\n\n$totalJsonTime = 0;\n$totalSerializeTime = 0;\n$totalJsonWins = 0;\n\nfor ($i = 0; $i < 1000; $i++) {\n // Time json encoding\n $start = microtime(true);\n $json = json_encode($testArray);\n $jsonTime = microtime(true) - $start;\n $totalJsonTime += $jsonTime;\n\n // Time serialization\n $start = microtime(true);\n $serial = serialize($testArray);\n $serializeTime = microtime(true) - $start;\n $totalSerializeTime += $serializeTime;\n\n if ($jsonTime < $serializeTime) {\n $totalJsonWins++;\n }\n}\n\n$totalSerializeWins = 1000 - $totalJsonWins;\n\n// Compare them\nif ($totalJsonTime < $totalSerializeTime) {\n printf(\"json_encode() (wins: $totalJsonWins) was roughly %01.2f%% faster than serialize()\\n\", ($totalSerializeTime / $totalJsonTime - 1) * 100);\n} else {\n printf(\"serialize() (wins: $totalSerializeWins) was roughly %01.2f%% faster than json_encode()\\n\", ($totalJsonTime / $totalSerializeTime - 1) * 100);\n}\n\n$totalJsonTime = 0;\n$totalJson2Time = 0;\n$totalSerializeTime = 0;\n$totalJsonWins = 0;\n\nfor ($i = 0; $i < 1000; $i++) {\n // Time json decoding\n $start = microtime(true);\n $orig = json_decode($json, true);\n $jsonTime = microtime(true) - $start;\n $totalJsonTime += $jsonTime;\n\n $start = microtime(true);\n $origObj = json_decode($json);\n $jsonTime2 = microtime(true) - $start;\n $totalJson2Time += $jsonTime2;\n\n // Time serialization\n $start = microtime(true);\n $unserial = unserialize($serial);\n $serializeTime = microtime(true) - $start;\n $totalSerializeTime += $serializeTime;\n\n if ($jsonTime < $serializeTime) {\n $totalJsonWins++;\n }\n}\n\n$totalSerializeWins = 1000 - $totalJsonWins;\n\n\n// Compare them\nif ($totalJsonTime < $totalSerializeTime) {\n printf(\"json_decode() was roughly %01.2f%% faster than unserialize()\\n\", ($totalSerializeTime / $totalJsonTime - 1) * 100);\n} else {\n printf(\"unserialize() (wins: $totalSerializeWins) was roughly %01.2f%% faster than json_decode()\\n\", ($totalJsonTime / $totalSerializeTime - 1) * 100);\n}\n\n// Compare them\nif ($totalJson2Time < $totalSerializeTime) {\n printf(\"json_decode() was roughly %01.2f%% faster than unserialize()\\n\", ($totalSerializeTime / $totalJson2Time - 1) * 100);\n} else {\n printf(\"unserialize() (wins: $totalSerializeWins) was roughly %01.2f%% faster than array json_decode()\\n\", ($totalJson2Time / $totalSerializeTime - 1) * 100);\n}\n\nfunction fillArray( $depth, $max ) {\n static $seed;\n if (is_null($seed)) {\n $seed = array('a', 2, 'c', 4, 'e', 6, 'g', 8, 'i', 10);\n }\n if ($depth < $max) {\n $node = array();\n foreach ($seed as $key) {\n $node[$key] = fillArray($depth + 1, $max);\n }\n return $node;\n }\n return 'empty';\n}\n\nI used this build of PHP 7:\n\nPHP 7.0.14 (cli) (built: Jan 18 2017 19:13:23) ( NTS ) Copyright (c)\n 1997-2016 The PHP Group Zend Engine v3.0.0, Copyright (c) 1998-2016\n Zend Technologies\n with Zend OPcache v7.0.14, Copyright (c) 1999-2016, by Zend Technologies\n\nAnd my results were:\n\nserialize() (wins: 999) was roughly 10.98% faster than json_encode()\n unserialize() (wins: 987) was roughly 33.26% faster than json_decode()\n unserialize() (wins: 987) was roughly 48.35% faster than array\n json_decode()\n\nSo clearly, serialize/unserialize is the fastest method, while json_encode/decode is the most portable.\nIf you consider a scenario where you read/write serialized data 10x or more often than you need to send to or receive from a non-PHP system, you are STILL better off to use serialize/unserialize and have it json_encode or json_decode prior to serialization in terms of time.\n",
"Before you make your final decision, be aware that the JSON format is not safe for associative arrays - json_decode() will return them as objects instead:\n$config = array(\n 'Frodo' => 'hobbit',\n 'Gimli' => 'dwarf',\n 'Gandalf' => 'wizard',\n );\nprint_r($config);\nprint_r(json_decode(json_encode($config)));\n\nOutput is:\nArray\n(\n [Frodo] => hobbit\n [Gimli] => dwarf\n [Gandalf] => wizard\n)\nstdClass Object\n(\n [Frodo] => hobbit\n [Gimli] => dwarf\n [Gandalf] => wizard\n)\n\n",
"just an fyi -- if you want to serialize your data to something easy to read and understand like JSON but with more compression and higher performance, you should check out messagepack.\n",
"THX - for this benchmark code:\nMy results on array I use for configuration are as fallows:\nJSON encoded in 0.0031511783599854 seconds\nPHP serialized in 0.0037961006164551 seconds\njson_encode() was roughly 20.47% faster than serialize()\nJSON encoded in 0.0070841312408447 seconds\nPHP serialized in 0.0035839080810547 seconds\nunserialize() was roughly 97.66% faster than json_encode()\nSo - test it on your own data.\n",
"JSON is better if you want to backup Data and restore it on a different machine or via FTP.\nFor example with serialize if you store data on a Windows server, download it via FTP and restore it on a Linux one it could not work any more due to the charachter re-encoding, because serialize stores the length of the strings and in the Unicode > UTF-8 transcoding some 1 byte charachter could became 2 bytes long making the algorithm crash.\n",
"If to summ up what people say here, json_decode/encode seems faster than serialize/unserialize BUT\nIf you do var_dump the type of the serialized object is changed.\nIf for some reason you want to keep the type, go with serialize!\n(try for example stdClass vs array)\nserialize/unserialize:\nArray cache:\narray (size=2)\n 'a' => string '1' (length=1)\n 'b' => int 2\nObject cache:\nobject(stdClass)[8]\n public 'field1' => int 123\nThis cache:\nobject(Controller\\Test)[8]\n protected 'view' => \n\njson encode/decode\nArray cache:\nobject(stdClass)[7]\n public 'a' => string '1' (length=1)\n public 'b' => int 2\nObject cache:\nobject(stdClass)[8]\n public 'field1' => int 123\nThis cache:\nobject(stdClass)[8]\n\nAs you can see the json_encode/decode converts all to stdClass, which is not that good, object info lost... So decide based on needs, especially if it is not only arrays...\n",
"Based on the information provided, it would be more efficient to store the array as JSON in this scenario. JSON is a widely-used and standardized format that is easy to read and can be easily converted to and from PHP arrays. Additionally, as you mentioned, json_decode is faster than unserialize in newer versions of PHP.\nOne potential pitfall of using JSON is that it is not as efficient in terms of space usage as serialized arrays. However, in most cases this should not be a significant concern.\nAs for benchmarks, it is difficult to provide specific numbers without conducting tests on your specific data and use case. However, in general, using JSON for storing and manipulating arrays in PHP is considered to be more efficient and easier to work with than serialized arrays.\n"
] | [
595,
258,
65,
61,
28,
19,
13,
11,
8,
8,
6,
6,
3,
3,
2,
1,
0,
0,
0,
0
] | [
"I would suggest you to use Super Cache, which is a file cache mechanism which won't use json_encode or serialize. It is simple to use and really fast compared to other PHP Cache mechanism. \nhttps://packagist.org/packages/smart-php/super-cache\nEx: \n<?php\nrequire __DIR__.'/vendor/autoload.php';\nuse SuperCache\\SuperCache as sCache;\n\n//Saving cache value with a key\n// sCache::cache('<key>')->set('<value>');\nsCache::cache('myKey')->set('Key_value');\n\n//Retrieving cache value with a key\necho sCache::cache('myKey')->get();\n?>\n\n"
] | [
-4
] | [
"arrays",
"json",
"performance",
"php",
"serialization"
] | stackoverflow_0000804045_arrays_json_performance_php_serialization.txt |
Q:
How to count cells that are within 2 values, in a range of cells in a pandas dataframe?
I have a dataframe that looks like that:
col1
0 10
1 5
2 8
3 12
4 13
5 6
6 9
7 11
8 10
9 3
10 21
11 18
12 14
13 16
14 30
15 45
16 31
17 40
18 38
For each cell in 'col1' I calculate a range of values:
df['df_min'] = df.col1 - df.col1 * 0.2
df['df_max'] = df.col1 + df.col1 * 0.2
For each cell there's a range, I would like to count how many cells in 'col1' in the past xx cells (3 in this example) are within that range, but without a loop as it takes a very long time with my actual model.
I'm trying to achieve this result:
col1 df_min df_max counter
0 10 8.0 12.0 -1
1 5 4.0 6.0 -1
2 8 6.4 9.6 -1
3 12 9.6 14.4 1
4 13 10.4 15.6 1
5 6 4.8 7.2 0
6 9 7.2 10.8 0
7 11 8.8 13.2 2
8 10 8.0 12.0 2
9 3 2.4 3.6 0
10 21 16.8 25.2 0
11 18 14.4 21.6 1
12 14 11.2 16.8 0
13 16 12.8 19.2 2
14 30 24.0 36.0 0
15 45 36.0 54.0 0
16 31 24.8 37.2 1
17 40 32.0 48.0 1
18 38 30.4 45.6 3
Here's the (messy) code that I could come up with, but I'd really like a faster solution, if possible. Any help would be gladly appreciated.
df = pd.DataFrame({"col1":[10, 5, 8, 12, 13, 6, 9, 11, 10, 3, 21, 18, 14, 16, 30, 45, 31, 40, 38]})
back = 3 # numbers of cells to check back
df['df_min'] = df.col1 - df.col1 * 0.2
df['df_max'] = df.col1 + df.col1 * 0.2
l = []
for window in df.col1.rolling(window=back+1, center=False, closed='right'):
if window.empty:
pass
else:
a = window.iloc[-1]
range_min = a - a * 0.2
range_max = a + a * 0.2
c = 0
if len(window) == back+1:
for b in window:
if (b >= range_min and b <= range_max):
c += 1
c = c-1 # substract 1 because window includes the tested value which is always true
l.append(c)
df1 = pd.DataFrame(l, columns=['counter'])
df = df.join(df1)
print(df)
A:
loop with vectorization operation
Code
df['df_min'] = df.col1 - df.col1 * 0.2
df['df_max'] = df.col1 + df.col1 * 0.2
n = 3
s = pd.Series(dtype='float')
for i in range(0, n):
s1 = df.col1.shift(i+1).ge(df['df_min']) & df.col1.shift(i+1).le(df['df_max'])
s = s.add(s1, fill_value=0)
s[:n] = -1
df['counter'] = s
output(df):
col1 df_min df_max counter
0 10 8.0 12.0 -1.0
1 5 4.0 6.0 -1.0
2 8 6.4 9.6 -1.0
3 12 9.6 14.4 1.0
4 13 10.4 15.6 1.0
5 6 4.8 7.2 0.0
6 9 7.2 10.8 0.0
7 11 8.8 13.2 2.0
8 10 8.0 12.0 2.0
9 3 2.4 3.6 0.0
10 21 16.8 25.2 0.0
11 18 14.4 21.6 1.0
12 14 11.2 16.8 0.0
13 16 12.8 19.2 2.0
14 30 24.0 36.0 0.0
15 45 36.0 54.0 0.0
16 31 24.8 37.2 1.0
17 40 32.0 48.0 1.0
18 38 30.4 45.6 3.0
i don know your dataset. However, when im testing with 1,000,000 rows and n = 10, this code takes only 0.4sec.
test example
import numpy as np
df = pd.DataFrame(np.random.randint(20,100, 1000000), columns=['col1'])
A:
Another possible solution, based on pandas.core.window.rolling.Rolling.apply. From the tests I did on my computer, this solution appears to be very fast and efficient, even for large datasets.
n = 3
df['counter'] = (df['col1'].rolling(n+1)
.apply(lambda x:
np.sum((x[:n] >= (x[n] * 0.8)) & (x[:n] <= (x[n] * 1.2))),
raw=True).fillna(-1).astype(int))
Output:
col1 df_min df_max counter
0 10 8.0 12.0 -1
1 5 4.0 6.0 -1
2 8 6.4 9.6 -1
3 12 9.6 14.4 1
4 13 10.4 15.6 1
5 6 4.8 7.2 0
6 9 7.2 10.8 0
7 11 8.8 13.2 2
8 10 8.0 12.0 2
9 3 2.4 3.6 0
10 21 16.8 25.2 0
11 18 14.4 21.6 1
12 14 11.2 16.8 0
13 16 12.8 19.2 2
14 30 24.0 36.0 0
15 45 36.0 54.0 0
16 31 24.8 37.2 1
17 40 32.0 48.0 1
18 38 30.4 45.6 3
| How to count cells that are within 2 values, in a range of cells in a pandas dataframe? | I have a dataframe that looks like that:
col1
0 10
1 5
2 8
3 12
4 13
5 6
6 9
7 11
8 10
9 3
10 21
11 18
12 14
13 16
14 30
15 45
16 31
17 40
18 38
For each cell in 'col1' I calculate a range of values:
df['df_min'] = df.col1 - df.col1 * 0.2
df['df_max'] = df.col1 + df.col1 * 0.2
For each cell there's a range, I would like to count how many cells in 'col1' in the past xx cells (3 in this example) are within that range, but without a loop as it takes a very long time with my actual model.
I'm trying to achieve this result:
col1 df_min df_max counter
0 10 8.0 12.0 -1
1 5 4.0 6.0 -1
2 8 6.4 9.6 -1
3 12 9.6 14.4 1
4 13 10.4 15.6 1
5 6 4.8 7.2 0
6 9 7.2 10.8 0
7 11 8.8 13.2 2
8 10 8.0 12.0 2
9 3 2.4 3.6 0
10 21 16.8 25.2 0
11 18 14.4 21.6 1
12 14 11.2 16.8 0
13 16 12.8 19.2 2
14 30 24.0 36.0 0
15 45 36.0 54.0 0
16 31 24.8 37.2 1
17 40 32.0 48.0 1
18 38 30.4 45.6 3
Here's the (messy) code that I could come up with, but I'd really like a faster solution, if possible. Any help would be gladly appreciated.
df = pd.DataFrame({"col1":[10, 5, 8, 12, 13, 6, 9, 11, 10, 3, 21, 18, 14, 16, 30, 45, 31, 40, 38]})
back = 3 # numbers of cells to check back
df['df_min'] = df.col1 - df.col1 * 0.2
df['df_max'] = df.col1 + df.col1 * 0.2
l = []
for window in df.col1.rolling(window=back+1, center=False, closed='right'):
if window.empty:
pass
else:
a = window.iloc[-1]
range_min = a - a * 0.2
range_max = a + a * 0.2
c = 0
if len(window) == back+1:
for b in window:
if (b >= range_min and b <= range_max):
c += 1
c = c-1 # substract 1 because window includes the tested value which is always true
l.append(c)
df1 = pd.DataFrame(l, columns=['counter'])
df = df.join(df1)
print(df)
| [
"loop with vectorization operation\nCode\ndf['df_min'] = df.col1 - df.col1 * 0.2\ndf['df_max'] = df.col1 + df.col1 * 0.2\nn = 3\ns = pd.Series(dtype='float')\nfor i in range(0, n):\n s1 = df.col1.shift(i+1).ge(df['df_min']) & df.col1.shift(i+1).le(df['df_max'])\n s = s.add(s1, fill_value=0)\ns[:n] = -1\ndf['counter'] = s\n\noutput(df):\n col1 df_min df_max counter\n0 10 8.0 12.0 -1.0\n1 5 4.0 6.0 -1.0\n2 8 6.4 9.6 -1.0\n3 12 9.6 14.4 1.0\n4 13 10.4 15.6 1.0\n5 6 4.8 7.2 0.0\n6 9 7.2 10.8 0.0\n7 11 8.8 13.2 2.0\n8 10 8.0 12.0 2.0\n9 3 2.4 3.6 0.0\n10 21 16.8 25.2 0.0\n11 18 14.4 21.6 1.0\n12 14 11.2 16.8 0.0\n13 16 12.8 19.2 2.0\n14 30 24.0 36.0 0.0\n15 45 36.0 54.0 0.0\n16 31 24.8 37.2 1.0\n17 40 32.0 48.0 1.0\n18 38 30.4 45.6 3.0\n\n\ni don know your dataset. However, when im testing with 1,000,000 rows and n = 10, this code takes only 0.4sec.\n\ntest example\nimport numpy as np\ndf = pd.DataFrame(np.random.randint(20,100, 1000000), columns=['col1'])\n\n",
"Another possible solution, based on pandas.core.window.rolling.Rolling.apply. From the tests I did on my computer, this solution appears to be very fast and efficient, even for large datasets.\nn = 3\n\ndf['counter'] = (df['col1'].rolling(n+1)\n .apply(lambda x: \n np.sum((x[:n] >= (x[n] * 0.8)) & (x[:n] <= (x[n] * 1.2))), \n raw=True).fillna(-1).astype(int))\n\nOutput:\n col1 df_min df_max counter\n0 10 8.0 12.0 -1\n1 5 4.0 6.0 -1\n2 8 6.4 9.6 -1\n3 12 9.6 14.4 1\n4 13 10.4 15.6 1\n5 6 4.8 7.2 0\n6 9 7.2 10.8 0\n7 11 8.8 13.2 2\n8 10 8.0 12.0 2\n9 3 2.4 3.6 0\n10 21 16.8 25.2 0\n11 18 14.4 21.6 1\n12 14 11.2 16.8 0\n13 16 12.8 19.2 2\n14 30 24.0 36.0 0\n15 45 36.0 54.0 0\n16 31 24.8 37.2 1\n17 40 32.0 48.0 1\n18 38 30.4 45.6 3\n\n"
] | [
1,
0
] | [] | [] | [
"dataframe",
"for_loop",
"optimization",
"pandas",
"python"
] | stackoverflow_0074673689_dataframe_for_loop_optimization_pandas_python.txt |
Q:
Helm has/merge option for overrides
I am trying to create a template whereby values have a default set of service names, unless they are overriden. example:
default:
service:
- name: nginx1
service: "dev-nginx1"
port: 8080
- name: nginx2
service: "dev-nginx2"
port: 8080
identifiers:
- identifier: "cust1"
- identifier: "cust2"
- identifier: "cust3"
overrides:
identifiers:
- identifier: "cust2"
service:
- name: nginx4
service: "cust2-nginx4"
port: 8080
- name: nginx12
service: "cust2-nginx12"
port: 8080
Where the above would yield:
---
identifier: cust1
service: dev-nginx
port: 8080
service: dev-nginx2
port: 8080
---
identifier: cust2
service: cust2-nginx4
port: 8080
service: cust2-nginx12
port: 8080
---
identifier: cust3
service: dev-nginx
port: 8080
service: dev-nginx2
port: 8080
I have tried the following, but i'm getting in a mess with the iterations in the incorrect place. Is there an easier way to accomplish this in helm with some sort of merge function?
{{- range $key, $values := $.Values.default.identifiers -}}
{{- range $overrideKey, $overrideValues := $.Values.overrides.identifiers -}}
{{ if eq $values.identifier $overrideValues.identifier }}
---
identifier: {{ $values.identifier }}
{{- range $value := $overrideValues.service }}
service: {{ $value.name }}
port: {{ $value.port }}
{{- end }}
{{ else }}
---
identifier: {{ $values.identifier }}
{{- range $value := $overrideValues.service }}
service: {{ $value.name }}
port: {{ $value.port }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
A:
Here's how you can modify your Helm template to achieve the desired output:
{{- range $identifier := .Values.default.identifiers }}
---
identifier: {{ $identifier.identifier }}
{{- $services := .Values.default.service }}
{{- range $override := .Values.overrides.identifiers }}
{{- if eq $identifier.identifier $override.identifier }}
{{- $services = $override.service }}
{{- end }}
{{- end }}
{{- range $service := $services }}
service: {{ $service.name }}
port: {{ $service.port }}
{{- end }}
{{- end }}
This template iterates over the list of identifiers in the default values. For each identifier, it first sets the list of services to the default list of services. It then checks the overrides list of identifiers to see if there is an override for the current identifier. If there is, it sets the list of services to the overridden list of services. Finally, it iterates over the list of services and outputs the name and port values for each service.
| Helm has/merge option for overrides | I am trying to create a template whereby values have a default set of service names, unless they are overriden. example:
default:
service:
- name: nginx1
service: "dev-nginx1"
port: 8080
- name: nginx2
service: "dev-nginx2"
port: 8080
identifiers:
- identifier: "cust1"
- identifier: "cust2"
- identifier: "cust3"
overrides:
identifiers:
- identifier: "cust2"
service:
- name: nginx4
service: "cust2-nginx4"
port: 8080
- name: nginx12
service: "cust2-nginx12"
port: 8080
Where the above would yield:
---
identifier: cust1
service: dev-nginx
port: 8080
service: dev-nginx2
port: 8080
---
identifier: cust2
service: cust2-nginx4
port: 8080
service: cust2-nginx12
port: 8080
---
identifier: cust3
service: dev-nginx
port: 8080
service: dev-nginx2
port: 8080
I have tried the following, but i'm getting in a mess with the iterations in the incorrect place. Is there an easier way to accomplish this in helm with some sort of merge function?
{{- range $key, $values := $.Values.default.identifiers -}}
{{- range $overrideKey, $overrideValues := $.Values.overrides.identifiers -}}
{{ if eq $values.identifier $overrideValues.identifier }}
---
identifier: {{ $values.identifier }}
{{- range $value := $overrideValues.service }}
service: {{ $value.name }}
port: {{ $value.port }}
{{- end }}
{{ else }}
---
identifier: {{ $values.identifier }}
{{- range $value := $overrideValues.service }}
service: {{ $value.name }}
port: {{ $value.port }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
| [
"Here's how you can modify your Helm template to achieve the desired output:\n{{- range $identifier := .Values.default.identifiers }}\n---\nidentifier: {{ $identifier.identifier }}\n{{- $services := .Values.default.service }}\n{{- range $override := .Values.overrides.identifiers }}\n{{- if eq $identifier.identifier $override.identifier }}\n{{- $services = $override.service }}\n{{- end }}\n{{- end }}\n{{- range $service := $services }}\nservice: {{ $service.name }}\nport: {{ $service.port }}\n{{- end }}\n{{- end }}\n\nThis template iterates over the list of identifiers in the default values. For each identifier, it first sets the list of services to the default list of services. It then checks the overrides list of identifiers to see if there is an override for the current identifier. If there is, it sets the list of services to the overridden list of services. Finally, it iterates over the list of services and outputs the name and port values for each service.\n"
] | [
0
] | [] | [] | [
"kubernetes_helm",
"sprig_template_functions"
] | stackoverflow_0074675356_kubernetes_helm_sprig_template_functions.txt |
Q:
How to get the contents of last line in tkinter text widget (Python 3)
I am working on a virtual console, which would use the systems builtin commands and then do the action and display output results on next line in console. This is all working, but how do I get the contents of the last line, and only the last line in the tkinter text widget? Thanks in advance. I am working in python 3.
I have treed using text.get(text.linestart, text.lineend) To no avail. Have these been deprecated? It spits out an error saying that AttributeError: 'Text' object has no attribute 'linestart'
A:
You can apply modifiers to the text widget indicies, such as linestart and lineend as well as adding and subtracting characters. The index after the last character is "end".
Putting that all together, you can get the start of the last line with "end-1c linestart".
A:
Test widget has a see(index) method.
text.see(END) will scroll the text to the last line.
A:
To get information from the last line you can use
text.get("end-1c linestart", "end-1c lineend")
| How to get the contents of last line in tkinter text widget (Python 3) | I am working on a virtual console, which would use the systems builtin commands and then do the action and display output results on next line in console. This is all working, but how do I get the contents of the last line, and only the last line in the tkinter text widget? Thanks in advance. I am working in python 3.
I have treed using text.get(text.linestart, text.lineend) To no avail. Have these been deprecated? It spits out an error saying that AttributeError: 'Text' object has no attribute 'linestart'
| [
"You can apply modifiers to the text widget indicies, such as linestart and lineend as well as adding and subtracting characters. The index after the last character is \"end\".\nPutting that all together, you can get the start of the last line with \"end-1c linestart\".\n",
"Test widget has a see(index) method.\ntext.see(END) will scroll the text to the last line. \n",
"To get information from the last line you can use\ntext.get(\"end-1c linestart\", \"end-1c lineend\")\n"
] | [
1,
0,
0
] | [] | [] | [
"console",
"python",
"python_3.x",
"tkinter"
] | stackoverflow_0040251259_console_python_python_3.x_tkinter.txt |
Q:
Disable ssl in Keycloak 20.0.1
I'm trying to disable https:
I started the sever using bin/kc.sh start-dev
And then I tried to disable ssl using:
root@server:/opt/keycloak#
root@server:/opt/keycloak# /opt/keycloak/bin/kcadm.sh config credentials --server http://localhost:8080/auth --realm master --user admin –-password admin
Logging into http://locahost:8080/auth as user admin of realm master
Enter password: *****
null [RESTEASY003210: Could not find resource for full path: http://localhost:8080/auth/realms/master/protocol/openid-connect/token]
root@server:/opt/keycloak#
Do you know what is the proper way to execute the command?
A:
The command you are trying to execute, /opt/keycloak/bin/kcadm.sh config credentials, is used to set the credentials for the Keycloak server administrator. This command is typically used to change the password for the administrator user, but it can also be used to set the server URL and the realm name.
The command you have provided has a few issues that are preventing it from running properly. First, the URL for the Keycloak server is incorrect. The URL you have provided, http://localhost:8080/auth, is missing the /admin path at the end. The correct URL for the Keycloak server is http://localhost:8080/auth/admin.
Additionally, the command you have provided is using the wrong option to specify the server URL. The correct option to use is --server-url, not --server.
Here is the correct command to use to set the credentials for the Keycloak server administrator:
/opt/keycloak/bin/kcadm.sh config credentials --server-url http://localhost:8080/auth/admin --realm master --user admin –-password admin
This command will set the server URL, realm name, and administrator user password for the Keycloak server. It will also log you in as the administrator user so that you can perform other administrative tasks.
Once you have logged in as the administrator, you can use the Keycloak web interface or the kcadm.sh command-line tool to disable HTTPS for your Keycloak server. The specific steps for doing this will depend on your Keycloak version and configuration, so it is best to consult the Keycloak documentation or seek assistance from the Keycloak community for more information.
| Disable ssl in Keycloak 20.0.1 | I'm trying to disable https:
I started the sever using bin/kc.sh start-dev
And then I tried to disable ssl using:
root@server:/opt/keycloak#
root@server:/opt/keycloak# /opt/keycloak/bin/kcadm.sh config credentials --server http://localhost:8080/auth --realm master --user admin –-password admin
Logging into http://locahost:8080/auth as user admin of realm master
Enter password: *****
null [RESTEASY003210: Could not find resource for full path: http://localhost:8080/auth/realms/master/protocol/openid-connect/token]
root@server:/opt/keycloak#
Do you know what is the proper way to execute the command?
| [
"The command you are trying to execute, /opt/keycloak/bin/kcadm.sh config credentials, is used to set the credentials for the Keycloak server administrator. This command is typically used to change the password for the administrator user, but it can also be used to set the server URL and the realm name.\nThe command you have provided has a few issues that are preventing it from running properly. First, the URL for the Keycloak server is incorrect. The URL you have provided, http://localhost:8080/auth, is missing the /admin path at the end. The correct URL for the Keycloak server is http://localhost:8080/auth/admin.\nAdditionally, the command you have provided is using the wrong option to specify the server URL. The correct option to use is --server-url, not --server.\nHere is the correct command to use to set the credentials for the Keycloak server administrator:\n/opt/keycloak/bin/kcadm.sh config credentials --server-url http://localhost:8080/auth/admin --realm master --user admin –-password admin\n\nThis command will set the server URL, realm name, and administrator user password for the Keycloak server. It will also log you in as the administrator user so that you can perform other administrative tasks.\nOnce you have logged in as the administrator, you can use the Keycloak web interface or the kcadm.sh command-line tool to disable HTTPS for your Keycloak server. The specific steps for doing this will depend on your Keycloak version and configuration, so it is best to consult the Keycloak documentation or seek assistance from the Keycloak community for more information.\n"
] | [
0
] | [] | [] | [
"keycloak",
"keycloak_rest_api"
] | stackoverflow_0074671439_keycloak_keycloak_rest_api.txt |
Q:
Improve performance of query using multiple join and where condition
I am using the following query,
select opp.amount, opp.close_date, opp.sales_date,
acc.name as acc_name, acc.lct
from account_details acc
inner join opportunity opp
on opp.accountid = acc.id
inner join opportunityItem oli
on oli.opportunityid = opp.id
where opp.created_date BETWEEN '03-01-2021' AND '03-12-2021'
and oli.tier_2 = 'IGT'
and opp.stagename != 'Closed - Lost'
and acc.lct = 'BIN'
and opp.close_date BETWEEN '2021-04-03' AND '2021-07-03'
Following is the query plan generated by postgres,
QUERY PLAN
Gather (cost=2927.46..429741.08 rows=1 width=57) (actual time=83448.632..155379.005 rows=1 loops=1)
Workers Planned: 2
Workers Launched: 2
-> Nested Loop (cost=1927.46..428740.98 rows=1 width=57) (actual time=131285.952..155209.978 rows=0 loops=3)
-> Nested Loop (cost=1927.38..428700.42 rows=10 width=35) (actual time=24114.179..154979.137 rows=61 loops=3)
-> Parallel Bitmap Heap Scan on opportunityItem oli (cost=1927.27..187430.72 rows=97091 width=19) (actual time=88.746..38661.601 rows=78891 loops=3)
Recheck Cond: ((tier_2)::text = 'IGT'::text)
Heap Blocks: exact=36235
-> Bitmap Index Scan on hc_idx_opportunitylineitem_tier_2 (cost=0.00..1915.62 rows=233019 width=0) (actual time=66.354..66.355 rows=236675 loops=1)
Index Cond: ((tier_2)::text = 'IGT'::text)
-> Index Scan using hcu_idx_opportunity_id on opportunity opp (cost=0.11..2.48 rows=1 width=54) (actual time=1.473..1.473 rows=0 loops=236673)
Index Cond: ((id)::text = (oli.opportunityid)::text)
Filter: ((created_date >= '2021-03-01 00:00:00'::timestamp without time zone) AND (created_date <= '2021-03-12 00:00:00'::timestamp without time zone) AND ((stagename)::text <> 'Closed - Lost'::text) AND (close_date >= '2021-04-03'::date) AND (close_date <= '2021-07-03'::date))
Rows Removed by Filter: 1
-> Index Scan using hcu_idx_account_id on account_details acc (cost=0.09..4.06 rows=1 width=60) (actual time=3.761..3.761 rows=0 loops=184)
Index Cond: ((id)::text = (opp.accountid)::text)
Filter: ((lct)::text = 'BIN'::text)
Rows Removed by Filter: 1
Planning Time: 1.267 ms
Execution Time: 155379.113 ms
Following is the definition of the opportunity table,
CREATE TABLE IF NOT EXISTS opportunity
(
partner_account_id character varying(18) COLLATE pg_catalog."default",
cancellation boolean,
close_date date,
accountid character varying(18) COLLATE pg_catalog."default",
opportunity_num character varying(30) COLLATE pg_catalog."default",
stagename character varying(255) COLLATE pg_catalog."default",
created_date timestamp without time zone,
amount double precision,
from_date date,
sales_date date,
id integer NOT NULL DEFAULT nextval('gehcsfdc.opportunity_id_seq'::regclass),
CONSTRAINT opportunity_pkey PRIMARY KEY (id)
)
WITH (
OIDS = FALSE
)
TABLESPACE pg_default;
CREATE INDEX IF NOT EXISTS hc_idx_opportunity_accountid
ON gehcsfdc.opportunity USING btree
(accountid COLLATE pg_catalog."default" ASC NULLS LAST)
TABLESPACE pg_default;
The table currently contains 51,13,390 records
The performance of the query is extremely poor. I am not willing to add indexing on all the columns included in the where clause, as I am unsure what impact it might have on the DMLs.
I have the following queries
As we are not selecting anything from the table "opportunityItem", should I move it from join clause and put inside where clause
Is there any option to prevent re-evaluation of the condition "Recheck Cond: ((tier_2)::text = 'IGT'::text)"
any other solution to optimize the query
A:
You need an index on opportunity with a first column of created_date or close_date to help support your query. That's because you do range-matching (where opp.created_date BETWEEN '03-01-2021' AND '03-12-2021') on those two columns.
Try this one:
CREATE INDEX IF NOT EXISTS hc_idx_opportunity_created_etc
ON gehcsfdc.opportunity USING btree (created_date)
INCLUDE (close_date, stagename, accountid);
This will help because PostgreSQL can filter your table with a range scan on created_date, then retrieve the other columns needed to filter and join the results from the index.
You are correct that creating a mess of single-column indexes on a table is generally useless. But creating indexes that support the shapes of your queries remains the best way to make them faster.
| Improve performance of query using multiple join and where condition | I am using the following query,
select opp.amount, opp.close_date, opp.sales_date,
acc.name as acc_name, acc.lct
from account_details acc
inner join opportunity opp
on opp.accountid = acc.id
inner join opportunityItem oli
on oli.opportunityid = opp.id
where opp.created_date BETWEEN '03-01-2021' AND '03-12-2021'
and oli.tier_2 = 'IGT'
and opp.stagename != 'Closed - Lost'
and acc.lct = 'BIN'
and opp.close_date BETWEEN '2021-04-03' AND '2021-07-03'
Following is the query plan generated by postgres,
QUERY PLAN
Gather (cost=2927.46..429741.08 rows=1 width=57) (actual time=83448.632..155379.005 rows=1 loops=1)
Workers Planned: 2
Workers Launched: 2
-> Nested Loop (cost=1927.46..428740.98 rows=1 width=57) (actual time=131285.952..155209.978 rows=0 loops=3)
-> Nested Loop (cost=1927.38..428700.42 rows=10 width=35) (actual time=24114.179..154979.137 rows=61 loops=3)
-> Parallel Bitmap Heap Scan on opportunityItem oli (cost=1927.27..187430.72 rows=97091 width=19) (actual time=88.746..38661.601 rows=78891 loops=3)
Recheck Cond: ((tier_2)::text = 'IGT'::text)
Heap Blocks: exact=36235
-> Bitmap Index Scan on hc_idx_opportunitylineitem_tier_2 (cost=0.00..1915.62 rows=233019 width=0) (actual time=66.354..66.355 rows=236675 loops=1)
Index Cond: ((tier_2)::text = 'IGT'::text)
-> Index Scan using hcu_idx_opportunity_id on opportunity opp (cost=0.11..2.48 rows=1 width=54) (actual time=1.473..1.473 rows=0 loops=236673)
Index Cond: ((id)::text = (oli.opportunityid)::text)
Filter: ((created_date >= '2021-03-01 00:00:00'::timestamp without time zone) AND (created_date <= '2021-03-12 00:00:00'::timestamp without time zone) AND ((stagename)::text <> 'Closed - Lost'::text) AND (close_date >= '2021-04-03'::date) AND (close_date <= '2021-07-03'::date))
Rows Removed by Filter: 1
-> Index Scan using hcu_idx_account_id on account_details acc (cost=0.09..4.06 rows=1 width=60) (actual time=3.761..3.761 rows=0 loops=184)
Index Cond: ((id)::text = (opp.accountid)::text)
Filter: ((lct)::text = 'BIN'::text)
Rows Removed by Filter: 1
Planning Time: 1.267 ms
Execution Time: 155379.113 ms
Following is the definition of the opportunity table,
CREATE TABLE IF NOT EXISTS opportunity
(
partner_account_id character varying(18) COLLATE pg_catalog."default",
cancellation boolean,
close_date date,
accountid character varying(18) COLLATE pg_catalog."default",
opportunity_num character varying(30) COLLATE pg_catalog."default",
stagename character varying(255) COLLATE pg_catalog."default",
created_date timestamp without time zone,
amount double precision,
from_date date,
sales_date date,
id integer NOT NULL DEFAULT nextval('gehcsfdc.opportunity_id_seq'::regclass),
CONSTRAINT opportunity_pkey PRIMARY KEY (id)
)
WITH (
OIDS = FALSE
)
TABLESPACE pg_default;
CREATE INDEX IF NOT EXISTS hc_idx_opportunity_accountid
ON gehcsfdc.opportunity USING btree
(accountid COLLATE pg_catalog."default" ASC NULLS LAST)
TABLESPACE pg_default;
The table currently contains 51,13,390 records
The performance of the query is extremely poor. I am not willing to add indexing on all the columns included in the where clause, as I am unsure what impact it might have on the DMLs.
I have the following queries
As we are not selecting anything from the table "opportunityItem", should I move it from join clause and put inside where clause
Is there any option to prevent re-evaluation of the condition "Recheck Cond: ((tier_2)::text = 'IGT'::text)"
any other solution to optimize the query
| [
"You need an index on opportunity with a first column of created_date or close_date to help support your query. That's because you do range-matching (where opp.created_date BETWEEN '03-01-2021' AND '03-12-2021') on those two columns.\nTry this one:\nCREATE INDEX IF NOT EXISTS hc_idx_opportunity_created_etc\n ON gehcsfdc.opportunity USING btree (created_date)\n INCLUDE (close_date, stagename, accountid);\n\nThis will help because PostgreSQL can filter your table with a range scan on created_date, then retrieve the other columns needed to filter and join the results from the index.\nYou are correct that creating a mess of single-column indexes on a table is generally useless. But creating indexes that support the shapes of your queries remains the best way to make them faster.\n"
] | [
0
] | [] | [] | [
"postgresql",
"query_optimization"
] | stackoverflow_0074652738_postgresql_query_optimization.txt |
Q:
How to rename a duplicate file in same path when uploading it?
I'm trying to upload some files, but when I upload a file that already exists in the path, it overwrites the existing file. I'm trying to get the files renamed if a file with same name exists in the path, such that if file.pdf gets uploaded, it gets renamed file(1).pdf if there is an existing file that goes by file.pdf.
Code
public async Task OnInputFile(InputFileChangeEventArgs e)
{
selectedFiles = e.GetMultipleFiles();
Message = $"{selectedFiles.Count} file(s) selected";
this.StateHasChanged();
foreach (var file in selectedFiles)
{
Stream stream = file.OpenReadStream(maxAllowedSize: 10_000_000);
var path = Path.Combine(Directory.GetCurrentDirectory(), "FilePath", file.Name);
FileStream fs = File.Create(path);
await stream.CopyToAsync(fs);
fs.Close();
stream.Close();
}
Message = $"{selectedFiles.Count} file(s) uploaded on server";
this.StateHasChanged();
}
Can anyone help me resolve this problem?
A:
Yes, you can modify your code to check if the file already exists in the path and rename the file if necessary. Here is one way you can do this:
public async Task OnInputFile(InputFileChangeEventArgs e)
{
selectedFiles = e.GetMultipleFiles();
Message = $"{selectedFiles.Count} file(s) selected";
this.StateHasChanged();
foreach (var file in selectedFiles)
{
Stream stream = file.OpenReadStream(maxAllowedSize: 10_000_000);
var path = Path.Combine(Directory.GetCurrentDirectory(), "FilePath", file.Name);
int i = 1;
while (File.Exists(path))
{
// File with this name already exists.
// Rename the file by adding a number in parentheses at the end of the file name.
string newPath = Path.Combine(Directory.GetCurrentDirectory(), "FilePath",
file.Name.Insert(file.Name.LastIndexOf('.'), $"({i})"));
i++;
path = newPath;
}
FileStream fs = File.Create(path);
await stream.CopyToAsync(fs);
fs.Close();
stream.Close();
}
Message = $"{selectedFiles.Count} file(s) uploaded on server";
this.StateHasChanged();
}
In this code, we added a while loop that checks if the file already exists in the path. If the file exists, we rename the file by adding a number in parentheses at the end of the file name. For example, if file.pdf already exists, the new file will be named file(1).pdf. If file(1).pdf already exists, the new file will be named file(2).pdf, and so on.
This should solve your problem and prevent files from being overwritten.
A:
To prevent the files from being overwritten, you can check if the file already exists in the target directory before saving it. If the file exists, you can rename the file by appending a number to the file name before saving it. Here's one way you can do that:
public async Task OnInputFile(InputFileChangeEventArgs e)
{
selectedFiles = e.GetMultipleFiles();
Message = $"{selectedFiles.Count} file(s) selected";
this.StateHasChanged();
// Create a list to store the new file names
List<string> newFileNames = new List<string>();
// Loop through the selected files
foreach (var file in selectedFiles)
{
// Open the file stream
Stream stream = file.OpenReadStream(maxAllowedSize: 10_000_000);
// Get the file name and file extension
string fileName = Path.GetFileNameWithoutExtension(file.Name);
string fileExt = Path.GetExtension(file.Name);
// Create a new file name by appending a number to the original file name
// if a file with the same name already exists in the target directory
string newFileName = fileName + fileExt;
int counter = 1;
while (File.Exists(Path.Combine(Directory.GetCurrentDirectory(), "FilePath", newFileName)))
{
newFileName = fileName + $" ({counter})" + fileExt;
counter++;
}
// Add the new file name to the list
newFileNames.Add(newFileName);
// Save the file with the new file name
var path = Path.Combine(Directory.GetCurrentDirectory(), "FilePath", newFileName);
FileStream fs = File.Create(path);
await stream.CopyToAsync(fs);
fs.Close();
stream.Close();
}
// Update the message with the new file names
Message = $"{selectedFiles.Count} file(s) uploaded on server: {string.Join(", ", newFileNames)}";
this.StateHasChanged();
}
This code checks if a file with the same name already exists in the target directory. If it does, it creates a new file name by appending a number in parentheses to the original file name. For example, if file.pdf already exists, the new file name will be file (1).pdf, and if file (1).pdf already exists, the new file name will be file (2).pdf, and so on. It then saves the file with the new file name.
A:
I think what you need to do is run a check to see if the file aready exists in the path first. After you've built your path variable, you can check it by adding the following before calling File.Create(path):
if (File.Exists(path))
{
// Change the File path here
path = Path.Combine(Directory.GetCurrentDirectory(), "FilePath", <<SOMEPREFIXHERE>>, file.Name);
}
I would maybe recommend using a datestamp to in the new version, as this would offer the most uniqueness and avoid a potential situation where the copy has already been uploaded.
As a better options, you may be better to use a while instead of the if. Let's say you're using a index as the prefix, and you already have the base copy and 1 new version, you'll use the loop to continue incrementing the prefix integer, until you end up on a version that doesn't exist:
var fileVersion = 1;
While (File.Exists(path))
{
path = Path.Combine(Directory.GetCurrentDirectory(), "FilePath", $"{fileVersion}-", file.Name);
fileVersion++;
}
| How to rename a duplicate file in same path when uploading it? | I'm trying to upload some files, but when I upload a file that already exists in the path, it overwrites the existing file. I'm trying to get the files renamed if a file with same name exists in the path, such that if file.pdf gets uploaded, it gets renamed file(1).pdf if there is an existing file that goes by file.pdf.
Code
public async Task OnInputFile(InputFileChangeEventArgs e)
{
selectedFiles = e.GetMultipleFiles();
Message = $"{selectedFiles.Count} file(s) selected";
this.StateHasChanged();
foreach (var file in selectedFiles)
{
Stream stream = file.OpenReadStream(maxAllowedSize: 10_000_000);
var path = Path.Combine(Directory.GetCurrentDirectory(), "FilePath", file.Name);
FileStream fs = File.Create(path);
await stream.CopyToAsync(fs);
fs.Close();
stream.Close();
}
Message = $"{selectedFiles.Count} file(s) uploaded on server";
this.StateHasChanged();
}
Can anyone help me resolve this problem?
| [
"Yes, you can modify your code to check if the file already exists in the path and rename the file if necessary. Here is one way you can do this:\npublic async Task OnInputFile(InputFileChangeEventArgs e)\n{\n selectedFiles = e.GetMultipleFiles();\n Message = $\"{selectedFiles.Count} file(s) selected\";\n this.StateHasChanged();\n\n foreach (var file in selectedFiles)\n {\n Stream stream = file.OpenReadStream(maxAllowedSize: 10_000_000);\n var path = Path.Combine(Directory.GetCurrentDirectory(), \"FilePath\", file.Name);\n int i = 1;\n while (File.Exists(path))\n {\n // File with this name already exists.\n // Rename the file by adding a number in parentheses at the end of the file name.\n string newPath = Path.Combine(Directory.GetCurrentDirectory(), \"FilePath\", \n file.Name.Insert(file.Name.LastIndexOf('.'), $\"({i})\"));\n i++;\n path = newPath;\n }\n FileStream fs = File.Create(path);\n await stream.CopyToAsync(fs);\n\n fs.Close();\n stream.Close();\n}\n\nMessage = $\"{selectedFiles.Count} file(s) uploaded on server\";\nthis.StateHasChanged();\n}\n\nIn this code, we added a while loop that checks if the file already exists in the path. If the file exists, we rename the file by adding a number in parentheses at the end of the file name. For example, if file.pdf already exists, the new file will be named file(1).pdf. If file(1).pdf already exists, the new file will be named file(2).pdf, and so on.\nThis should solve your problem and prevent files from being overwritten.\n",
"To prevent the files from being overwritten, you can check if the file already exists in the target directory before saving it. If the file exists, you can rename the file by appending a number to the file name before saving it. Here's one way you can do that:\npublic async Task OnInputFile(InputFileChangeEventArgs e)\n{\n selectedFiles = e.GetMultipleFiles();\n Message = $\"{selectedFiles.Count} file(s) selected\";\n this.StateHasChanged();\n\n // Create a list to store the new file names\n List<string> newFileNames = new List<string>();\n\n // Loop through the selected files\n foreach (var file in selectedFiles)\n {\n // Open the file stream\n Stream stream = file.OpenReadStream(maxAllowedSize: 10_000_000);\n\n // Get the file name and file extension\n string fileName = Path.GetFileNameWithoutExtension(file.Name);\n string fileExt = Path.GetExtension(file.Name);\n\n // Create a new file name by appending a number to the original file name\n // if a file with the same name already exists in the target directory\n string newFileName = fileName + fileExt;\n int counter = 1;\n while (File.Exists(Path.Combine(Directory.GetCurrentDirectory(), \"FilePath\", newFileName)))\n {\n newFileName = fileName + $\" ({counter})\" + fileExt;\n counter++;\n }\n\n // Add the new file name to the list\n newFileNames.Add(newFileName);\n\n // Save the file with the new file name\n var path = Path.Combine(Directory.GetCurrentDirectory(), \"FilePath\", newFileName);\n FileStream fs = File.Create(path);\n await stream.CopyToAsync(fs);\n\n fs.Close();\n stream.Close();\n }\n\n // Update the message with the new file names\n Message = $\"{selectedFiles.Count} file(s) uploaded on server: {string.Join(\", \", newFileNames)}\";\n this.StateHasChanged();\n}\n\nThis code checks if a file with the same name already exists in the target directory. If it does, it creates a new file name by appending a number in parentheses to the original file name. For example, if file.pdf already exists, the new file name will be file (1).pdf, and if file (1).pdf already exists, the new file name will be file (2).pdf, and so on. It then saves the file with the new file name.\n",
"I think what you need to do is run a check to see if the file aready exists in the path first. After you've built your path variable, you can check it by adding the following before calling File.Create(path):\nif (File.Exists(path))\n{\n // Change the File path here\n path = Path.Combine(Directory.GetCurrentDirectory(), \"FilePath\", <<SOMEPREFIXHERE>>, file.Name);\n}\n\nI would maybe recommend using a datestamp to in the new version, as this would offer the most uniqueness and avoid a potential situation where the copy has already been uploaded.\nAs a better options, you may be better to use a while instead of the if. Let's say you're using a index as the prefix, and you already have the base copy and 1 new version, you'll use the loop to continue incrementing the prefix integer, until you end up on a version that doesn't exist:\nvar fileVersion = 1;\n\nWhile (File.Exists(path))\n{\n path = Path.Combine(Directory.GetCurrentDirectory(), \"FilePath\", $\"{fileVersion}-\", file.Name);\n fileVersion++;\n}\n\n"
] | [
1,
1,
1
] | [] | [] | [
"blazor",
"blazor_server_side"
] | stackoverflow_0074675330_blazor_blazor_server_side.txt |
Q:
Laravel - 9 Vite with vue cli 3 - vue-router error
I can't fixed it please help. what is the issues.
I need help. can't find out it.
A:
Run: npm run watch
Then move your files from /resources/js to /resources/js/src.
Then, I suggest you make your file named route as a router.
The name "components" or "views" does not matter, but I would prefer to create a folder named "views" and create "components" in it.
Then you can try to import like these according to the path you will create:
import Home from '../views/index.vue';
or
import Home from '../views/components/home/index.vue';
Finally, don't forget to run: php artisan optimize:clear
| Laravel - 9 Vite with vue cli 3 - vue-router error | I can't fixed it please help. what is the issues.
I need help. can't find out it.
| [
"Run: npm run watch\nThen move your files from /resources/js to /resources/js/src.\nThen, I suggest you make your file named route as a router.\nThe name \"components\" or \"views\" does not matter, but I would prefer to create a folder named \"views\" and create \"components\" in it.\nThen you can try to import like these according to the path you will create:\nimport Home from '../views/index.vue';\n\nor\nimport Home from '../views/components/home/index.vue';\n\nFinally, don't forget to run: php artisan optimize:clear\n"
] | [
0
] | [] | [] | [
"laravel",
"routes",
"vite",
"vue.js",
"vue_router"
] | stackoverflow_0074674920_laravel_routes_vite_vue.js_vue_router.txt |
Q:
ESPAsyncWebServer request->send_P problem
I am doing a simple example using ESPAsyncWebServer on ESP32. In this context I wrote a html file (there are a slider and a button) and tested it on a browser until the content look well. Then I integrate it in the C++ source code for ESP32. It works and look as expected until I don't add a callback to read the value of the slider.
This is the complete source code:
#include <Arduino.h>
#include <WiFi.h>
#include <AsyncTCP.h>
#include <ESPAsyncWebServer.h>
// Replace with your network credentials
const char ssid[] = "Vodafone-A40881218";
const char pswd[] = "rJbFMktHCcqN67Ye";
const int output = 2;
String sliderValue = "0";
// setting PWM properties
const int freq = 5000;
const int ledChannel = 0;
const int resolution = 8;
const char* PARAM_INPUT = "value";
// Create AsyncWebServer object on port 80
AsyncWebServer server(80);
#if 0
const char old_index_html[] PROGMEM = R"rawliteral(
<!DOCTYPE HTML><html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>ESP Web Server</title>
<style>
html {font-family: Arial; display: inline-block; text-align: center;}
h2 {font-size: 2.3rem;}
p {font-size: 1.9rem;}
body {max-width: 400px; margin:0px auto; padding-bottom: 25px;}
.slider { -webkit-appearance: none; margin: 14px; width: 360px; height: 25px; background: #FFD65C; outline: none; -webkit-transition: .2s; transition: opacity .2s;}
.slider::-webkit-slider-thumb {-webkit-appearance: none; appearance: none; width: 35px; height: 35px; background: #003249; cursor: pointer;}
.slider::-moz-range-thumb { width: 35px; height: 35px; background: #003249; cursor: pointer; }
button { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:20%;color:white;font-size:130%; }
.buttons { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:15%;color:white;font-size:80%; }
.buttonsm { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:9%; color:white;font-size:70%; }
.buttonm { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:15%;color:white;font-size:70%; }
.buttonw { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:40%;color:white;font-size:70%; }
.buttong { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:40%;color:white;font-size:130%; }
</style>
</head>
<body>
<h2>ESP Web Server</h2>
<p><span id="textSliderValue">%SLIDERVALUE%</span></p>
<p><input type="range" onchange="updateSliderPWM(this)" id="pwmSlider" min="0" max="21" value="%SLIDERVALUE%" step="1" class="slider"></p>
<a href='/setup'><button class='button'>SETUP</button></a>
<script>
function updateSliderPWM(element) {
var sliderValue = document.getElementById("pwmSlider").value;
document.getElementById("textSliderValue").innerHTML = sliderValue;
console.log(sliderValue);
var xhr = new XMLHttpRequest();
xhr.open("GET", "/slider?value="+sliderValue, true);
xhr.send();
}
</script>
</body>
</html>
)rawliteral";
#endif
const char index_html[] = R"rawliteral(
<!DOCTYPE HTML>
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>ESP Web Server</title>
<style>
html {font-family: Arial; display: inline-block; text-align: center;}
h2 {font-size: 2.3rem;}
p {font-size: 1.9rem;}
body {max-width: 400px; margin:0px auto; padding-bottom: 25px;}
.slider { width: 360px; }
.slider::-webkit-slider-thumb { width: 50px; height: 50px; }
.slider::-moz-range-thumb { width: 50px; height: 50px; }
button { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:40%;color:white;font-size:130%; }
.buttons { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:15%;color:white;font-size:80%; }
.buttonx { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:9%; color:white;font-size:70%; }
.buttonm { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:15%;color:white;font-size:70%; }
.buttonw { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:40%;color:white;font-size:70%; }
.buttong { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:40%;color:white;font-size:130%; }
</style>
</head>
<body>
<h2>ESP Web Server</h2>
<p><span id="textSliderValue">%SLIDERVALUE%</span></p>
<p><input type="range" onchange="updateSliderPWM(this)" id="pwmSlider" min="0" max="21" value="%SLIDERVALUE%" step="1" class="slider"></p>
<a href='/setup'><button class='button'>SETUP</button></a>
<script>
function updateSliderPWM(element) {
var sliderValue = document.getElementById("pwmSlider").value;
document.getElementById("textSliderValue").innerHTML = sliderValue;
console.log(sliderValue);
var xhr = new XMLHttpRequest();
xhr.open("GET", "/slider?value="+sliderValue, true);
xhr.send();
}
</script>
</body>
</html>
)rawliteral";
// Replaces placeholder with button section in your web page
String processor(const String& var)
{
//Serial.println(var);
if (var == "SLIDERVALUE"){
return sliderValue;
}
return String();
}
#include <Preferences.h>
Preferences Pref;
int32_t g_iVolume = 0;
void setup()
{
// Serial port for debugging purposes
Serial.begin(115200);
Pref.begin("datasetup", false);
g_iVolume = Pref.getInt("volume", 5);
Serial.print("volume="); Serial.println(g_iVolume);
sliderValue = String(g_iVolume);
Pref.end();
// Connect to Wi-Fi
WiFi.begin(ssid, pswd);
Serial.println("Connecting ...");
while (WiFi.status() != WL_CONNECTED)
{ // Wait for the Wi-Fi to connect: scan for Wi-Fi networks, and connect to the strongest of the networks above
delay(250);
Serial.print('.');
}
// Print ESP Local IP Address
Serial.println(WiFi.localIP());
// Route for root / web page
server.on("/", HTTP_GET, [](AsyncWebServerRequest *request){
request->send_P(200, "text/html", index_html, processor);
});
// Send a GET request to <ESP_IP>/slider?value=<inputMessage>
server.on("/slider", HTTP_GET, [] (AsyncWebServerRequest *request) {
String inputMessage;
// GET input1 value on <ESP_IP>/slider?value=<inputMessage>
if (request->hasParam(PARAM_INPUT)) {
inputMessage = request->getParam(PARAM_INPUT)->value();
sliderValue = inputMessage;
int ival = sliderValue.toInt();
Serial.print("ival="); Serial.println(ival);
Pref.begin("datasetup", false);
size_t st = Pref.putInt("volume", ival);
Pref.end();
Serial.print("st="); Serial.println(st);
Pref.begin("datasetup", false);
int vol = Pref.getInt("volume", -1);
Serial.print("volume="); Serial.println(vol);
Pref.end();
}
else {
inputMessage = "No message sent";
}
Serial.println(inputMessage);
request->send(200, "text/plain", "OK");
});
// Start server
server.begin();
}
//------------------------------------------------------------------------
void loop()
{
// put your main code here, to run repeatedly:
}
//------------------------------------------------------------------------
the 1st image is with nullptr instead of processor and the 2nd with processor callback.
A:
I found the problem. The author of ESPAsyncWebServer decided to use the % character as the delimiter for placeholders for the template processor. Unfortunately, the % is quite common in CSS and JavaScript so, writing CSS and JavaScript in HTML file(s) or Strings does not work as expected because the wrong interpretation of % as delimiters of chuncks of text that are not placeholders but CSS or JavaScript code. At the moment there is not a workaround, just do not use % in CSS and JavaScript.
| ESPAsyncWebServer request->send_P problem | I am doing a simple example using ESPAsyncWebServer on ESP32. In this context I wrote a html file (there are a slider and a button) and tested it on a browser until the content look well. Then I integrate it in the C++ source code for ESP32. It works and look as expected until I don't add a callback to read the value of the slider.
This is the complete source code:
#include <Arduino.h>
#include <WiFi.h>
#include <AsyncTCP.h>
#include <ESPAsyncWebServer.h>
// Replace with your network credentials
const char ssid[] = "Vodafone-A40881218";
const char pswd[] = "rJbFMktHCcqN67Ye";
const int output = 2;
String sliderValue = "0";
// setting PWM properties
const int freq = 5000;
const int ledChannel = 0;
const int resolution = 8;
const char* PARAM_INPUT = "value";
// Create AsyncWebServer object on port 80
AsyncWebServer server(80);
#if 0
const char old_index_html[] PROGMEM = R"rawliteral(
<!DOCTYPE HTML><html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>ESP Web Server</title>
<style>
html {font-family: Arial; display: inline-block; text-align: center;}
h2 {font-size: 2.3rem;}
p {font-size: 1.9rem;}
body {max-width: 400px; margin:0px auto; padding-bottom: 25px;}
.slider { -webkit-appearance: none; margin: 14px; width: 360px; height: 25px; background: #FFD65C; outline: none; -webkit-transition: .2s; transition: opacity .2s;}
.slider::-webkit-slider-thumb {-webkit-appearance: none; appearance: none; width: 35px; height: 35px; background: #003249; cursor: pointer;}
.slider::-moz-range-thumb { width: 35px; height: 35px; background: #003249; cursor: pointer; }
button { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:20%;color:white;font-size:130%; }
.buttons { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:15%;color:white;font-size:80%; }
.buttonsm { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:9%; color:white;font-size:70%; }
.buttonm { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:15%;color:white;font-size:70%; }
.buttonw { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:40%;color:white;font-size:70%; }
.buttong { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:40%;color:white;font-size:130%; }
</style>
</head>
<body>
<h2>ESP Web Server</h2>
<p><span id="textSliderValue">%SLIDERVALUE%</span></p>
<p><input type="range" onchange="updateSliderPWM(this)" id="pwmSlider" min="0" max="21" value="%SLIDERVALUE%" step="1" class="slider"></p>
<a href='/setup'><button class='button'>SETUP</button></a>
<script>
function updateSliderPWM(element) {
var sliderValue = document.getElementById("pwmSlider").value;
document.getElementById("textSliderValue").innerHTML = sliderValue;
console.log(sliderValue);
var xhr = new XMLHttpRequest();
xhr.open("GET", "/slider?value="+sliderValue, true);
xhr.send();
}
</script>
</body>
</html>
)rawliteral";
#endif
const char index_html[] = R"rawliteral(
<!DOCTYPE HTML>
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>ESP Web Server</title>
<style>
html {font-family: Arial; display: inline-block; text-align: center;}
h2 {font-size: 2.3rem;}
p {font-size: 1.9rem;}
body {max-width: 400px; margin:0px auto; padding-bottom: 25px;}
.slider { width: 360px; }
.slider::-webkit-slider-thumb { width: 50px; height: 50px; }
.slider::-moz-range-thumb { width: 50px; height: 50px; }
button { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:40%;color:white;font-size:130%; }
.buttons { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:15%;color:white;font-size:80%; }
.buttonx { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:9%; color:white;font-size:70%; }
.buttonm { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:15%;color:white;font-size:70%; }
.buttonw { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:40%;color:white;font-size:70%; }
.buttong { border-radius:0.5em;background:#C20000;padding:0.3em 0.3em;width:40%;color:white;font-size:130%; }
</style>
</head>
<body>
<h2>ESP Web Server</h2>
<p><span id="textSliderValue">%SLIDERVALUE%</span></p>
<p><input type="range" onchange="updateSliderPWM(this)" id="pwmSlider" min="0" max="21" value="%SLIDERVALUE%" step="1" class="slider"></p>
<a href='/setup'><button class='button'>SETUP</button></a>
<script>
function updateSliderPWM(element) {
var sliderValue = document.getElementById("pwmSlider").value;
document.getElementById("textSliderValue").innerHTML = sliderValue;
console.log(sliderValue);
var xhr = new XMLHttpRequest();
xhr.open("GET", "/slider?value="+sliderValue, true);
xhr.send();
}
</script>
</body>
</html>
)rawliteral";
// Replaces placeholder with button section in your web page
String processor(const String& var)
{
//Serial.println(var);
if (var == "SLIDERVALUE"){
return sliderValue;
}
return String();
}
#include <Preferences.h>
Preferences Pref;
int32_t g_iVolume = 0;
void setup()
{
// Serial port for debugging purposes
Serial.begin(115200);
Pref.begin("datasetup", false);
g_iVolume = Pref.getInt("volume", 5);
Serial.print("volume="); Serial.println(g_iVolume);
sliderValue = String(g_iVolume);
Pref.end();
// Connect to Wi-Fi
WiFi.begin(ssid, pswd);
Serial.println("Connecting ...");
while (WiFi.status() != WL_CONNECTED)
{ // Wait for the Wi-Fi to connect: scan for Wi-Fi networks, and connect to the strongest of the networks above
delay(250);
Serial.print('.');
}
// Print ESP Local IP Address
Serial.println(WiFi.localIP());
// Route for root / web page
server.on("/", HTTP_GET, [](AsyncWebServerRequest *request){
request->send_P(200, "text/html", index_html, processor);
});
// Send a GET request to <ESP_IP>/slider?value=<inputMessage>
server.on("/slider", HTTP_GET, [] (AsyncWebServerRequest *request) {
String inputMessage;
// GET input1 value on <ESP_IP>/slider?value=<inputMessage>
if (request->hasParam(PARAM_INPUT)) {
inputMessage = request->getParam(PARAM_INPUT)->value();
sliderValue = inputMessage;
int ival = sliderValue.toInt();
Serial.print("ival="); Serial.println(ival);
Pref.begin("datasetup", false);
size_t st = Pref.putInt("volume", ival);
Pref.end();
Serial.print("st="); Serial.println(st);
Pref.begin("datasetup", false);
int vol = Pref.getInt("volume", -1);
Serial.print("volume="); Serial.println(vol);
Pref.end();
}
else {
inputMessage = "No message sent";
}
Serial.println(inputMessage);
request->send(200, "text/plain", "OK");
});
// Start server
server.begin();
}
//------------------------------------------------------------------------
void loop()
{
// put your main code here, to run repeatedly:
}
//------------------------------------------------------------------------
the 1st image is with nullptr instead of processor and the 2nd with processor callback.
| [
"I found the problem. The author of ESPAsyncWebServer decided to use the % character as the delimiter for placeholders for the template processor. Unfortunately, the % is quite common in CSS and JavaScript so, writing CSS and JavaScript in HTML file(s) or Strings does not work as expected because the wrong interpretation of % as delimiters of chuncks of text that are not placeholders but CSS or JavaScript code. At the moment there is not a workaround, just do not use % in CSS and JavaScript.\n"
] | [
0
] | [] | [] | [
"esp32"
] | stackoverflow_0074649351_esp32.txt |
Q:
google chrome console, print image
About year ago i created a plugin to enhance console logs, main idea was to print images in console, so for example You could add some icons or glyphs.
It was working pretty nice, i saw that there is many of those available online right now. The problem is that none of them are working atm.
I noticed it after last chrome update i think. currently i have version 49.0.2623.112.
All of those plugins including mine works in the same way:
console.log("%c" + dim.string, dim.style + "background: url(" + url + "); background-size: " + (this.width * scale) + "px " + (this.height * scale) + "px; color: transparent;");
For example this one: plugin link on github
Does anyone know how we can print images in console in newer versions of chrome ?
A:
Try a code example with console F12:
console.log('%c ', 'font-size:400px; background:url(https://pics.me.me/codeit-google-until-youfinda-stackoverflow-answerwith-code-to-copy-paste-34126823.png) no-repeat;');
A:
I've been searching for a while for one that can print out the whole image without cutting it, and make it resizeable, and I came up with basically this:
console.image = function(url, size = 100) {
var image = new Image();
image.onload = function() {
var style = [
'font-size: 1px;',
'padding: ' + this.height/100*size + 'px ' + this.width/100*size + 'px;',
'background: url('+ url +') no-repeat;',
'background-size: contain;'
].join(' ');
console.log('%c ', style);
};
image.src = url;
};
and then just use console.image(URL[, size]); to print out the image.
The URL needs to be a valid URL and the size is basically percentage, with 100 being the default value. It can get shrunk down if the value is lower than 100, and expanded if the value is higher than 100.
A:
It appears that Chrome removed support for background images in the console in Chrome 101 and Edge 101.
I don't believe it has ever worked in Firefox (at least it does not in Firefox 100).
By contrast, it still works in Safari 15.4.
I have been trying to find an article that explains why this functionality was removed from Chrome/Edge but so far haven't come across any.
A:
I ran into your console.image GitHub repository as a matter of fact while looking into the same issue. Although the post is quite old, I learned from the horse's mouth that it works in Chrome Canary. In fact, I tried your plugin demo in Canary and was able to see the spinning chicken. I still haven't found out why it suddenly stopped working in Chrome. The feature still works in Firebug for Firefox. The console.log() documentation for Chrome on this only showcases text-based styling.
I found one SO example where they load the image first and then apply the styling using console.log("%c....", "...");. Unfortunately, that still didn't work in "standard" Chrome.
So, short answer, it looks like Canary for now supports images in the console.
A:
From a Web Worker, document, Image, and Canvas are not available. If the image is an ImageData, it can be printed to the console using OffscreenCanvas:
function debugImage(imageData) {
try {
const c = new OffscreenCanvas(imageData.width, imageData.height);
const ctx = c.getContext("2d");
if (ctx) {
ctx.putImageData(imageData, 0, 0);
c.convertToBlob().then((blob) => {
const dataUri = new FileReaderSync().readAsDataURL(blob);
const style = `font-size: 300px; background-image: url("${dataUri}"); background-size: contain; background-repeat: no-repeat;`;
console.log("%c ", style);
});
}
} catch (e) {
console.error(e);
}
}
Tested with Chrome 108.
| google chrome console, print image | About year ago i created a plugin to enhance console logs, main idea was to print images in console, so for example You could add some icons or glyphs.
It was working pretty nice, i saw that there is many of those available online right now. The problem is that none of them are working atm.
I noticed it after last chrome update i think. currently i have version 49.0.2623.112.
All of those plugins including mine works in the same way:
console.log("%c" + dim.string, dim.style + "background: url(" + url + "); background-size: " + (this.width * scale) + "px " + (this.height * scale) + "px; color: transparent;");
For example this one: plugin link on github
Does anyone know how we can print images in console in newer versions of chrome ?
| [
"Try a code example with console F12:\n\n\nconsole.log('%c ', 'font-size:400px; background:url(https://pics.me.me/codeit-google-until-youfinda-stackoverflow-answerwith-code-to-copy-paste-34126823.png) no-repeat;');\n\n\n\n",
"I've been searching for a while for one that can print out the whole image without cutting it, and make it resizeable, and I came up with basically this:\nconsole.image = function(url, size = 100) {\n var image = new Image();\n image.onload = function() {\n var style = [\n 'font-size: 1px;',\n 'padding: ' + this.height/100*size + 'px ' + this.width/100*size + 'px;',\n 'background: url('+ url +') no-repeat;',\n 'background-size: contain;'\n ].join(' ');\n console.log('%c ', style);\n };\n image.src = url;\n};\n\nand then just use console.image(URL[, size]); to print out the image.\nThe URL needs to be a valid URL and the size is basically percentage, with 100 being the default value. It can get shrunk down if the value is lower than 100, and expanded if the value is higher than 100.\n",
"It appears that Chrome removed support for background images in the console in Chrome 101 and Edge 101.\nI don't believe it has ever worked in Firefox (at least it does not in Firefox 100).\nBy contrast, it still works in Safari 15.4.\nI have been trying to find an article that explains why this functionality was removed from Chrome/Edge but so far haven't come across any.\n",
"I ran into your console.image GitHub repository as a matter of fact while looking into the same issue. Although the post is quite old, I learned from the horse's mouth that it works in Chrome Canary. In fact, I tried your plugin demo in Canary and was able to see the spinning chicken. I still haven't found out why it suddenly stopped working in Chrome. The feature still works in Firebug for Firefox. The console.log() documentation for Chrome on this only showcases text-based styling.\nI found one SO example where they load the image first and then apply the styling using console.log(\"%c....\", \"...\");. Unfortunately, that still didn't work in \"standard\" Chrome.\nSo, short answer, it looks like Canary for now supports images in the console.\n",
"From a Web Worker, document, Image, and Canvas are not available. If the image is an ImageData, it can be printed to the console using OffscreenCanvas:\nfunction debugImage(imageData) {\n try {\n const c = new OffscreenCanvas(imageData.width, imageData.height);\n const ctx = c.getContext(\"2d\");\n if (ctx) {\n ctx.putImageData(imageData, 0, 0);\n c.convertToBlob().then((blob) => {\n const dataUri = new FileReaderSync().readAsDataURL(blob);\n const style = `font-size: 300px; background-image: url(\"${dataUri}\"); background-size: contain; background-repeat: no-repeat;`;\n console.log(\"%c \", style);\n });\n }\n } catch (e) {\n console.error(e);\n }\n}\n\nTested with Chrome 108.\n"
] | [
54,
8,
8,
1,
0
] | [] | [] | [
"google_chrome",
"javascript"
] | stackoverflow_0036885562_google_chrome_javascript.txt |
Q:
Can't access Excel file from Python and get error No such file or directory?
I work with SQL Server 2019 and Python 3.10.
When I try to read an Excel file with OPENROWSET using this statement:
SELECT *
FROM OPENROWSET('Microsoft.ACE.OLEDB.12.0', 'Excel 12.0 Xml;Database=\\192.168.7.9\\Import\6\strtinsertinput (4)-953aee07-ca14-4213-a91e-ab0b0f7f3db2.xlsx;HDR=YES','select * FROM [Sheet1$]')
It reads an Excel file successfully,
But when I try to read it using Python from SQL query
EXECUTE sp_execute_external_script
@language = N'Python',
@script = N'import pandas as pd
df = pd.read_excel("\\192.168.7.9\\Import\6\strtinsertinput (4)-953aee07-ca14-4213-a91e-ab0b0f7f3db2.xlsx", sheet_name = "Sheet1")';
GO
I get this error:
Error in execution. Check the output for more information.
Traceback (most recent call last):
File "", line 5, in
File "C:\ProgramData\MSSQLSERVER\Temp-PY\Appcontainer1\46CB4A4F-004A-4329-A390-FEF283444F33\sqlindb_0.py", line 31, in transform
df = pd.read_excel("\192.168.7.9\Import\6\strtinsertinput (4)-953aee07-ca14-4213-a91e-ab0b0f7f3db2.xlsx", sheet_name = "Sheet1")
File "C:\Program Files\Microsoft SQL Server\MSSQL15.MSSQLSERVER\PYTHON_SERVICES\lib\site-packages\pandas\util_decorators.py", line 178, in wrapper
return func(*args, **kwargs)
File "C:\Program Files\Microsoft SQL Server\MSSQL15.MSSQLSERVER\PYTHON_SERVICES\lib\site-packages\pandas\util_decorators.py", line 178, in wrapper
return func(*args, **kwargs)
File "C:\Program Files\Microsoft SQL Server\MSSQL15.MSSQLSERVER\PYTHON_SERVICES\lib\site-packages\pandas\io\excel.py", line 307, in read_excel
io = ExcelFile(io, engine=engine)
Msg 39019, Level 16, State 2, Line 0
An external script error occurred:
File "C:\Program Files\Microsoft SQL Server\MSSQL15.MSSQLSERVER\PYTHON_SERVICES\lib\site-packages\pandas\io\excel.py", line 394, in init
self.book = xlrd.open_workbook(self.io)
File "C:\Program Files\Microsoft SQL Server\MSSQL15.MSSQLSERVER\PYTHON_SERVICES\lib\site-packages\xlrd_init.py", line 111, in open_workbook
with open(filename, "rb") as f:
FileNotFoundError: [Errno 2] No such file or directory: '\192.168.7.9\Import\x06\strtinsertinput (4)-953aee07-ca14-4213-a91e-ab0b0f7f3db2.xlsx'
How to solve this issue?
A:
I recommend changing pandas to openpyxl for reading operations.
df = pd.read_excel(file_path, engine='openpyxl')
| Can't access Excel file from Python and get error No such file or directory? | I work with SQL Server 2019 and Python 3.10.
When I try to read an Excel file with OPENROWSET using this statement:
SELECT *
FROM OPENROWSET('Microsoft.ACE.OLEDB.12.0', 'Excel 12.0 Xml;Database=\\192.168.7.9\\Import\6\strtinsertinput (4)-953aee07-ca14-4213-a91e-ab0b0f7f3db2.xlsx;HDR=YES','select * FROM [Sheet1$]')
It reads an Excel file successfully,
But when I try to read it using Python from SQL query
EXECUTE sp_execute_external_script
@language = N'Python',
@script = N'import pandas as pd
df = pd.read_excel("\\192.168.7.9\\Import\6\strtinsertinput (4)-953aee07-ca14-4213-a91e-ab0b0f7f3db2.xlsx", sheet_name = "Sheet1")';
GO
I get this error:
Error in execution. Check the output for more information.
Traceback (most recent call last):
File "", line 5, in
File "C:\ProgramData\MSSQLSERVER\Temp-PY\Appcontainer1\46CB4A4F-004A-4329-A390-FEF283444F33\sqlindb_0.py", line 31, in transform
df = pd.read_excel("\192.168.7.9\Import\6\strtinsertinput (4)-953aee07-ca14-4213-a91e-ab0b0f7f3db2.xlsx", sheet_name = "Sheet1")
File "C:\Program Files\Microsoft SQL Server\MSSQL15.MSSQLSERVER\PYTHON_SERVICES\lib\site-packages\pandas\util_decorators.py", line 178, in wrapper
return func(*args, **kwargs)
File "C:\Program Files\Microsoft SQL Server\MSSQL15.MSSQLSERVER\PYTHON_SERVICES\lib\site-packages\pandas\util_decorators.py", line 178, in wrapper
return func(*args, **kwargs)
File "C:\Program Files\Microsoft SQL Server\MSSQL15.MSSQLSERVER\PYTHON_SERVICES\lib\site-packages\pandas\io\excel.py", line 307, in read_excel
io = ExcelFile(io, engine=engine)
Msg 39019, Level 16, State 2, Line 0
An external script error occurred:
File "C:\Program Files\Microsoft SQL Server\MSSQL15.MSSQLSERVER\PYTHON_SERVICES\lib\site-packages\pandas\io\excel.py", line 394, in init
self.book = xlrd.open_workbook(self.io)
File "C:\Program Files\Microsoft SQL Server\MSSQL15.MSSQLSERVER\PYTHON_SERVICES\lib\site-packages\xlrd_init.py", line 111, in open_workbook
with open(filename, "rb") as f:
FileNotFoundError: [Errno 2] No such file or directory: '\192.168.7.9\Import\x06\strtinsertinput (4)-953aee07-ca14-4213-a91e-ab0b0f7f3db2.xlsx'
How to solve this issue?
| [
"I recommend changing pandas to openpyxl for reading operations.\ndf = pd.read_excel(file_path, engine='openpyxl')\n"
] | [
0
] | [] | [] | [
"openrowset",
"pandas",
"python_3.x",
"sql_server_2019",
"tsql"
] | stackoverflow_0072768205_openrowset_pandas_python_3.x_sql_server_2019_tsql.txt |
Q:
How to whitelist Apple and Google to access deep link association files?
We have implemented Universal Links for iOS and Android App Links.
We have deployed the following files as per the documentation:
/.well-known/apple-app-site-association
/.well-known/assetlinks.json
Everything works fine until my organization applied domain-level block on traffic out side my country.
We need to whitelist Apple and Google servers but I cannot find their IPs or domain used to access those files.
A:
Universal Links and Android App Links are both technologies that allow developers to link to content in their mobile apps from web pages and other apps. These technologies use specific files that are deployed on the developer's website to enable this functionality.
The files you mention, apple-app-site-association and assetlinks.json, are the files used by these technologies to enable linking to content in your app from other apps and web pages. These files must be deployed on your website at the /.well-known path, as you have done, in order for Universal Links and Android App Links to work properly.
When someone tries to open a link to content in your app, their device will connect to your website and download these files to verify that the link is allowed and to determine how to handle the link. This is why it is important to whitelist the servers used by Apple and Google to access these files on your website.
To whitelist the servers used by Apple and Google to access these files, you will need to know the IP addresses or domain names used by these servers. Unfortunately, I do not have access to the internet and cannot provide the specific IP addresses or domain names used by these servers.
However, you can find this information by doing the following:
Use a network monitor or packet sniffer to capture the network traffic to your website when someone tries to open a link to your app. This will allow you to see the IP addresses or domain names used by the servers that are accessing your apple-app-site-association and assetlinks.json files.
Contact Apple and Google and ask them for the IP addresses or domain names used by their servers to access these files on your website. They should be able to provide this information to you.
Once you have the IP addresses or domain names used by the Apple and Google servers, you can whitelist them on your organization's domain-level block to allow traffic to and from these servers. This should enable Universal Links and Android App Links to work properly for your app.
It is worth noting that the IP addresses or domain names used by these servers may change over time, so you may need to update your whitelist periodically to ensure that it remains up to date. Also, whitelisting these servers may have security implications, so be sure to carefully consider the risks and benefits before implementing this change.
| How to whitelist Apple and Google to access deep link association files? | We have implemented Universal Links for iOS and Android App Links.
We have deployed the following files as per the documentation:
/.well-known/apple-app-site-association
/.well-known/assetlinks.json
Everything works fine until my organization applied domain-level block on traffic out side my country.
We need to whitelist Apple and Google servers but I cannot find their IPs or domain used to access those files.
| [
"Universal Links and Android App Links are both technologies that allow developers to link to content in their mobile apps from web pages and other apps. These technologies use specific files that are deployed on the developer's website to enable this functionality.\nThe files you mention, apple-app-site-association and assetlinks.json, are the files used by these technologies to enable linking to content in your app from other apps and web pages. These files must be deployed on your website at the /.well-known path, as you have done, in order for Universal Links and Android App Links to work properly.\nWhen someone tries to open a link to content in your app, their device will connect to your website and download these files to verify that the link is allowed and to determine how to handle the link. This is why it is important to whitelist the servers used by Apple and Google to access these files on your website.\nTo whitelist the servers used by Apple and Google to access these files, you will need to know the IP addresses or domain names used by these servers. Unfortunately, I do not have access to the internet and cannot provide the specific IP addresses or domain names used by these servers.\nHowever, you can find this information by doing the following:\n\nUse a network monitor or packet sniffer to capture the network traffic to your website when someone tries to open a link to your app. This will allow you to see the IP addresses or domain names used by the servers that are accessing your apple-app-site-association and assetlinks.json files.\n\nContact Apple and Google and ask them for the IP addresses or domain names used by their servers to access these files on your website. They should be able to provide this information to you.\n\n\nOnce you have the IP addresses or domain names used by the Apple and Google servers, you can whitelist them on your organization's domain-level block to allow traffic to and from these servers. This should enable Universal Links and Android App Links to work properly for your app.\nIt is worth noting that the IP addresses or domain names used by these servers may change over time, so you may need to update your whitelist periodically to ensure that it remains up to date. Also, whitelisting these servers may have security implications, so be sure to carefully consider the risks and benefits before implementing this change.\n"
] | [
0
] | [] | [] | [
"android_app_links",
"ios_universal_links"
] | stackoverflow_0074614632_android_app_links_ios_universal_links.txt |
Q:
Deleting an element from an array in PHP
Is there an easy way to delete an element from an array using PHP, such that foreach ($array) no longer includes that element?
I thought that setting it to null would do it, but apparently it does not work.
A:
There are different ways to delete an array element, where some are more useful for some specific tasks than others.
Deleting a single array element
If you want to delete just one array element you can use unset() or alternatively \array_splice().
If you know the value and don’t know the key to delete the element you can use \array_search() to get the key. This only works if the element does not occur more than once, since \array_search returns the first hit only.
unset()
Note that when you use unset() the array keys won’t change. If you want to reindex the keys you can use \array_values() after unset(), which will convert all keys to numerically enumerated keys starting from 0.
Code:
$array = [0 => "a", 1 => "b", 2 => "c"];
unset($array[1]);
// ↑ Key which you want to delete
Output:
[
[0] => a
[2] => c
]
\array_splice() method
If you use \array_splice() the keys will automatically be reindexed, but the associative keys won’t change — as opposed to \array_values(), which will convert all keys to numerical keys.
\array_splice() needs the offset, not the key, as the second parameter.
Code:
$array = [0 => "a", 1 => "b", 2 => "c"];
\array_splice($array, 1, 1);
// ↑ Offset which you want to delete
Output:
[
[0] => a
[1] => c
]
array_splice(), same as unset(), take the array by reference. You don’t assign the return values of those functions back to the array.
Deleting multiple array elements
If you want to delete multiple array elements and don’t want to call unset() or \array_splice() multiple times you can use the functions \array_diff() or \array_diff_key() depending on whether you know the values or the keys of the elements which you want to delete.
\array_diff() method
If you know the values of the array elements which you want to delete, then you can use \array_diff(). As before with unset() it won’t change the keys of the array.
Code:
$array = [0 => "a", 1 => "b", 2 => "c", 3 => "c"];
$array = \array_diff($array, ["a", "c"]);
// └────────┘
// Array values which you want to delete
Output:
[
[1] => b
]
\array_diff_key() method
If you know the keys of the elements which you want to delete, then you want to use \array_diff_key(). You have to make sure you pass the keys as keys in the second parameter and not as values. Keys won’t reindex.
Code:
$array = [0 => "a", 1 => "b", 2 => "c"];
$array = \array_diff_key($array, [0 => "xy", "2" => "xy"]);
// ↑ ↑
// Array keys which you want to delete
Output:
[
[1] => b
]
If you want to use unset() or \array_splice() to delete multiple elements with the same value you can use \array_keys() to get all the keys for a specific value and then delete all elements.
\array_filter() method
If you want to delete all elements with a specific value in the array you can use \array_filter().
Code:
$array = [0 => "a", 1 => "b", 2 => "c"];
$array = \array_filter($array, static function ($element) {
return $element !== "b";
// ↑
// Array value which you want to delete
});
Output:
[
[0] => a
[1] => c
]
A:
It should be noted that unset() will keep indexes untouched, which is what you'd expect when using string indexes (array as hashtable), but can be quite surprising when dealing with integer indexed arrays:
$array = array(0, 1, 2, 3);
unset($array[2]);
var_dump($array);
/* array(3) {
[0]=>
int(0)
[1]=>
int(1)
[3]=>
int(3)
} */
$array = array(0, 1, 2, 3);
array_splice($array, 2, 1);
var_dump($array);
/* array(3) {
[0]=>
int(0)
[1]=>
int(1)
[2]=>
int(3)
} */
So array_splice() can be used if you'd like to normalize your integer keys. Another option is using array_values() after unset():
$array = array(0, 1, 2, 3);
unset($array[2]);
$array = array_values($array);
var_dump($array);
/* array(3) {
[0]=>
int(0)
[1]=>
int(1)
[2]=>
int(3)
} */
A:
// Our initial array
$arr = array("blue", "green", "red", "yellow", "green", "orange", "yellow", "indigo", "red");
print_r($arr);
// Remove the elements who's values are yellow or red
$arr = array_diff($arr, array("yellow", "red"));
print_r($arr);
This is the output from the code above:
Array
(
[0] => blue
[1] => green
[2] => red
[3] => yellow
[4] => green
[5] => orange
[6] => yellow
[7] => indigo
[8] => red
)
Array
(
[0] => blue
[1] => green
[4] => green
[5] => orange
[7] => indigo
)
Now, array_values() will reindex a numerical array nicely, but it will remove all key strings from the array and replace them with numbers. If you need to preserve the key names (strings), or reindex the array if all keys are numerical, use array_merge():
$arr = array_merge(array_diff($arr, array("yellow", "red")));
print_r($arr);
Outputs
Array
(
[0] => blue
[1] => green
[2] => green
[3] => orange
[4] => indigo
)
A:
$key = array_search($needle, $array);
if ($key !== false) {
unset($array[$key]);
}
A:
unset($array[$index]);
A:
Also, for a named element:
unset($array["elementName"]);
A:
If you have a numerically indexed array where all values are unique (or they are non-unique but you wish to remove all instances of a particular value), you can simply use array_diff() to remove a matching element, like this:
$my_array = array_diff($my_array, array('Value_to_remove'));
For example:
$my_array = array('Andy', 'Bertha', 'Charles', 'Diana');
echo sizeof($my_array) . "\n";
$my_array = array_diff($my_array, array('Charles'));
echo sizeof($my_array);
This displays the following:
4
3
In this example, the element with the value 'Charles' is removed as can be verified by the sizeof() calls that report a size of 4 for the initial array, and 3 after the removal.
A:
Destroy a single element of an array
unset()
$array1 = array('A', 'B', 'C', 'D', 'E');
unset($array1[2]); // Delete known index(2) value from array
var_dump($array1);
The output will be:
array(4) {
[0]=>
string(1) "A"
[1]=>
string(1) "B"
[3]=>
string(1) "D"
[4]=>
string(1) "E"
}
If you need to re index the array:
$array1 = array_values($array1);
var_dump($array1);
Then the output will be:
array(4) {
[0]=>
string(1) "A"
[1]=>
string(1) "B"
[2]=>
string(1) "D"
[3]=>
string(1) "E"
}
Pop the element off the end of array - return the value of the removed element
mixed array_pop(array &$array)
$stack = array("orange", "banana", "apple", "raspberry");
$last_fruit = array_pop($stack);
print_r($stack);
print_r('Last Fruit:'.$last_fruit); // Last element of the array
The output will be
Array
(
[0] => orange
[1] => banana
[2] => apple
)
Last Fruit: raspberry
Remove the first element (red) from an array, - return the value of the removed element
mixed array_shift ( array &$array )
$color = array("a" => "red", "b" => "green" , "c" => "blue");
$first_color = array_shift($color);
print_r ($color);
print_r ('First Color: '.$first_color);
The output will be:
Array
(
[b] => green
[c] => blue
)
First Color: red
A:
<?php
$stack = ["fruit1", "fruit2", "fruit3", "fruit4"];
$fruit = array_shift($stack);
print_r($stack);
echo $fruit;
?>
Output:
[
[0] => fruit2
[1] => fruit3
[2] => fruit4
]
fruit1
A:
If the index is specified:
$arr = ['a', 'b', 'c'];
$index = 0;
unset($arr[$index]); // $arr = ['b', 'c']
If we have value instead of index:
$arr = ['a', 'b', 'c'];
// search the value to find index
// Notice! this will only find the first occurrence of value
$index = array_search('a', $arr);
if($index !== false){
unset($arr[$index]); // $arr = ['b', 'c']
}
The if condition is necessary
because if index is not found, unset() will automatically delete
the first element of the array which is not what we want.
A:
If you have to delete multiple values in an array and the entries in that array are objects or structured data, array_filter() is your best bet. Those entries that return a true from the callback function will be retained.
$array = [
['x'=>1,'y'=>2,'z'=>3],
['x'=>2,'y'=>4,'z'=>6],
['x'=>3,'y'=>6,'z'=>9]
];
$results = array_filter($array, function($value) {
return $value['x'] > 2;
}); //=> [['x'=>3,'y'=>6,z=>'9']]
A:
Associative arrays
For associative arrays, use unset:
$arr = array('a' => 1, 'b' => 2, 'c' => 3);
unset($arr['b']);
// RESULT: array('a' => 1, 'c' => 3)
Numeric arrays
For numeric arrays, use array_splice:
$arr = array(1, 2, 3);
array_splice($arr, 1, 1);
// RESULT: array(0 => 1, 1 => 3)
Note
Using unset for numeric arrays will not produce an error, but it will mess up your indexes:
$arr = array(1, 2, 3);
unset($arr[1]);
// RESULT: array(0 => 1, 2 => 3)
A:
If you need to remove multiple elements from an associative array, you can use array_diff_key() (here used with array_flip()):
$my_array = array(
"key1" => "value 1",
"key2" => "value 2",
"key3" => "value 3",
"key4" => "value 4",
"key5" => "value 5",
);
$to_remove = array("key2", "key4");
$result = array_diff_key($my_array, array_flip($to_remove));
print_r($result);
Output:
Array ( [key1] => value 1 [key3] => value 3 [key5] => value 5 )
A:
unset() destroys the specified variables.
The behavior of unset() inside of a function can vary depending on what type of variable you are attempting to destroy.
If a globalized variable is unset() inside of a function, only the local variable is destroyed. The variable in the calling environment will retain the same value as before unset() was called.
<?php
function destroy_foo()
{
global $foo;
unset($foo);
}
$foo = 'bar';
destroy_foo();
echo $foo;
?>
The answer of the above code will be bar.
To unset() a global variable inside of a function:
<?php
function foo()
{
unset($GLOBALS['bar']);
}
$bar = "something";
foo();
?>
A:
// Remove by value
function removeFromArr($arr, $val)
{
unset($arr[array_search($val, $arr)]);
return array_values($arr);
}
A:
Solutions:
To delete one element, use unset():
unset($array[3]);
unset($array['foo']);
To delete multiple noncontiguous elements, also use unset():
unset($array[3], $array[5]);
unset($array['foo'], $array['bar']);
To delete multiple contiguous elements, use array_splice():
array_splice($array, $offset, $length);
Further explanation:
Using these functions removes all references to these elements from PHP. If you want to keep a key in the array, but with an empty value, assign the empty string to the element:
$array[3] = $array['foo'] = '';
Besides syntax, there's a logical difference between using unset() and assigning '' to the element. The first says This doesn't exist anymore, while the second says This still exists, but its value is the empty string.
If you're dealing with numbers, assigning 0 may be a better alternative. So, if a company stopped production of the model XL1000 sprocket, it would update its inventory with:
unset($products['XL1000']);
However, if it temporarily ran out of XL1000 sprockets, but was planning to receive a new shipment from the plant later this week, this is better:
$products['XL1000'] = 0;
If you unset() an element, PHP adjusts the array so that looping still works correctly. It doesn't compact the array to fill in the missing holes. This is what we mean when we say that all arrays are associative, even when they appear to be numeric. Here's an example:
// Create a "numeric" array
$animals = array('ant', 'bee', 'cat', 'dog', 'elk', 'fox');
print $animals[1]; // Prints 'bee'
print $animals[2]; // Prints 'cat'
count($animals); // Returns 6
// unset()
unset($animals[1]); // Removes element $animals[1] = 'bee'
print $animals[1]; // Prints '' and throws an E_NOTICE error
print $animals[2]; // Still prints 'cat'
count($animals); // Returns 5, even though $array[5] is 'fox'
// Add a new element
$animals[ ] = 'gnu'; // Add a new element (not Unix)
print $animals[1]; // Prints '', still empty
print $animals[6]; // Prints 'gnu', this is where 'gnu' ended up
count($animals); // Returns 6
// Assign ''
$animals[2] = ''; // Zero out value
print $animals[2]; // Prints ''
count($animals); // Returns 6, count does not decrease
To compact the array into a densely filled numeric array, use array_values():
$animals = array_values($animals);
Alternatively, array_splice() automatically reindexes arrays to avoid leaving holes:
// Create a "numeric" array
$animals = array('ant', 'bee', 'cat', 'dog', 'elk', 'fox');
array_splice($animals, 2, 2);
print_r($animals);
Array
(
[0] => ant
[1] => bee
[2] => elk
[3] => fox
)
This is useful if you're using the array as a queue and want to remove items from the queue while still allowing random access. To safely remove the first or last element from an array, use array_shift() and array_pop(), respectively.
A:
Follow the default functions:
PHP: unset
unset() destroys the specified variables. For more info, you can refer to PHP unset
$Array = array("test1", "test2", "test3", "test3");
unset($Array[2]);
PHP: array_pop
The array_pop() function deletes the last element of an array. For more info, you can refer to PHP array_pop
$Array = array("test1", "test2", "test3", "test3");
array_pop($Array);
PHP: array_splice
The array_splice() function removes selected elements from an array and replaces it with new elements. For more info, you can refer to PHP array_splice
$Array = array("test1", "test2", "test3", "test3");
array_splice($Array,1,2);
PHP: array_shift
The array_shift() function removes the first element from an array. For more info, you can refer to PHP array_shift
$Array = array("test1", "test2", "test3", "test3");
array_shift($Array);
A:
I'd just like to say I had a particular object that had variable attributes (it was basically mapping a table and I was changing the columns in the table, so the attributes in the object, reflecting the table would vary as well):
class obj {
protected $fields = array('field1','field2');
protected $field1 = array();
protected $field2 = array();
protected loadfields(){}
// This will load the $field1 and $field2 with rows of data for the column they describe
protected function clearFields($num){
foreach($fields as $field) {
unset($this->$field[$num]);
// This did not work the line below worked
unset($this->{$field}[$num]); // You have to resolve $field first using {}
}
}
}
The whole purpose of $fields was just, so I don't have to look everywhere in the code when they're changed, I just look at the beginning of the class and change the list of attributes and the $fields array content to reflect the new attributes.
A:
Suppose you have the following array:
Array
(
[user_id] => 193
[storage] => 5
)
To delete storage, do:
unset($attributes['storage']);
$attributes = array_filter($attributes);
And you get:
Array
(
[user_id] => 193
)
A:
Two ways for removing the first item of an array with keeping order of the index and also if you don't know the key name of the first item.
Solution #1
// 1 is the index of the first object to get
// NULL to get everything until the end
// true to preserve keys
$array = array_slice($array, 1, null, true);
Solution #2
// Rewinds the array's internal pointer to the first element
// and returns the value of the first array element.
$value = reset($array);
// Returns the index element of the current array position
$key = key($array);
unset($array[$key]);
For this sample data:
$array = array(10 => "a", 20 => "b", 30 => "c");
You must have this result:
array(2) {
[20]=>
string(1) "b"
[30]=>
string(1) "c"
}
A:
unset() multiple, fragmented elements from an array
While unset() has been mentioned here several times, it has yet to be mentioned that unset() accepts multiple variables making it easy to delete multiple, noncontiguous elements from an array in one operation:
// Delete multiple, noncontiguous elements from an array
$array = [ 'foo', 'bar', 'baz', 'quz' ];
unset( $array[2], $array[3] );
print_r($array);
// Output: [ 'foo', 'bar' ]
unset() dynamically
unset() does not accept an array of keys to remove, so the code below will fail (it would have made it slightly easier to use unset() dynamically though).
$array = range(0,5);
$remove = [1,2];
$array = unset( $remove ); // FAILS: "unexpected 'unset'"
print_r($array);
Instead, unset() can be used dynamically in a foreach loop:
$array = range(0,5);
$remove = [1,2];
foreach ($remove as $k=>$v) {
unset($array[$v]);
}
print_r($array);
// Output: [ 0, 3, 4, 5 ]
Remove array keys by copying the array
There is also another practice that has yet to be mentioned.
Sometimes, the simplest way to get rid of certain array keys is to simply copy $array1 into $array2.
$array1 = range(1,10);
foreach ($array1 as $v) {
// Remove all even integers from the array
if( $v % 2 ) {
$array2[] = $v;
}
}
print_r($array2);
// Output: [ 1, 3, 5, 7, 9 ];
Obviously, the same practice applies to text strings:
$array1 = [ 'foo', '_bar', 'baz' ];
foreach ($array1 as $v) {
// Remove all strings beginning with underscore
if( strpos($v,'_')===false ) {
$array2[] = $v;
}
}
print_r($array2);
// Output: [ 'foo', 'baz' ]
A:
<?php
// If you want to remove a particular array element use this method
$my_array = array("key1"=>"value 1", "key2"=>"value 2", "key3"=>"value 3");
print_r($my_array);
if (array_key_exists("key1", $my_array)) {
unset($my_array['key1']);
print_r($my_array);
}
else {
echo "Key does not exist";
}
?>
<?php
//To remove first array element
$my_array = array("key1"=>"value 1", "key2"=>"value 2", "key3"=>"value 3");
print_r($my_array);
$new_array = array_slice($my_array, 1);
print_r($new_array);
?>
<?php
echo "<br/> ";
// To remove first array element to length
// starts from first and remove two element
$my_array = array("key1"=>"value 1", "key2"=>"value 2", "key3"=>"value 3");
print_r($my_array);
$new_array = array_slice($my_array, 1, 2);
print_r($new_array);
?>
Output
Array ( [key1] => value 1 [key2] => value 2 [key3] =>
value 3 ) Array ( [key2] => value 2 [key3] => value 3 )
Array ( [key1] => value 1 [key2] => value 2 [key3] => value 3 )
Array ( [key2] => value 2 [key3] => value 3 )
Array ( [key1] => value 1 [key2] => value 2 [key3] => value 3 )
Array ( [key2] => value 2 [key3] => value 3 )
A:
Remove an array element based on a key:
Use the unset function like below:
$a = array(
'salam',
'10',
1
);
unset($a[1]);
print_r($a);
/*
Output:
Array
(
[0] => salam
[2] => 1
)
*/
Remove an array element based on value:
Use the array_search function to get an element key and use the above manner to remove an array element like below:
$a = array(
'salam',
'10',
1
);
$key = array_search(10, $a);
if ($key !== false) {
unset($a[$key]);
}
print_r($a);
/*
Output:
Array
(
[0] => salam
[2] => 1
)
*/
A:
Edit
If you can't take it as given that the object is in that array you need to add a check:
if(in_array($object,$array)) unset($array[array_search($object,$array)]);
Original Answer
if you want to remove a specific object of an array by reference of that object you can do following:
unset($array[array_search($object,$array)]);
Example:
<?php
class Foo
{
public $id;
public $name;
}
$foo1 = new Foo();
$foo1->id = 1;
$foo1->name = 'Name1';
$foo2 = new Foo();
$foo2->id = 2;
$foo2->name = 'Name2';
$foo3 = new Foo();
$foo3->id = 3;
$foo3->name = 'Name3';
$array = array($foo1,$foo2,$foo3);
unset($array[array_search($foo2,$array)]);
echo '<pre>';
var_dump($array);
echo '</pre>';
?>
Result:
array(2) {
[0]=>
object(Foo)#1 (2) {
["id"]=>
int(1)
["name"]=>
string(5) "Name1"
}
[2]=>
object(Foo)#3 (2) {
["id"]=>
int(3)
["name"]=>
string(5) "Name3"
}
}
Note that if the object occures several times it will only be removed the first occurence!
A:
Use the following code:
$arr = array('orange', 'banana', 'apple', 'raspberry');
$result = array_pop($arr);
print_r($result);
A:
I came here because I wanted to see if there was a more elegant solution to this problem than using unset($arr[$i]). To my disappointment these answers are either wrong or do not cover every edge case.
Here is why array_diff() does not work. Keys are unique in the array, while elements are not always unique.
$arr = [1,2,2,3];
foreach($arr as $i => $n){
$b = array_diff($arr,[$n]);
echo "\n".json_encode($b);
}
Results...
[2,2,3]
[1,3]
[1,2,2]
If two elements are the same they will be remove. This also applies for array_search() and array_flip().
I saw a lot of answers with array_slice() and array_splice(), but these functions only work with numeric arrays. All the answers I am aware if here does not answer the question, and so here is a solution that will work.
$arr = [1,2,3];
foreach($arr as $i => $n){
$b = array_merge(array_slice($arr,0,$i),array_slice($arr,$i+1));
echo "\n".json_encode($b);
}
Results...
[2,3];
[1,3];
[1,2];
Since unset($arr[$i]) will work on both associative array and numeric arrays this still does not answer the question.
This solution is to compare the keys and with a tool that will handle both numeric and associative arrays. I use array_diff_uassoc() for this. This function compares the keys in a call back function.
$arr = [1,2,2,3];
//$arr = ['a'=>'z','b'=>'y','c'=>'x','d'=>'w'];
foreach($arr as $key => $n){
$b = array_diff_uassoc($arr, [$key=>$n], function($a,$b) {
if($a != $b){
return 1;
}
});
echo "\n".json_encode($b);
}
Results.....
[2,2,3];
[1,2,3];
[1,2,2];
['b'=>'y','c'=>'x','d'=>'w'];
['a'=>'z','c'=>'x','d'=>'w'];
['a'=>'z','b'=>'y','d'=>'w'];
['a'=>'z','b'=>'y','c'=>'x'];
A:
Step one
firt of all start with php syntex
<?php
Step Two
create an array named animals
<?php
$animals= array(
'cat', // [0]
'dog', // [1]
'cow' // [2]
);
Step three
remove item at index 1 which is 'for'
unset($animals1[1]);
Step four
Print modified array
var_dump($danimals1);
Step five
Re-index the array elements
$newarray = array_values($animals1);
Step six
Print re-indexed array
var_dump($newarray);
Step seven
Close php code
?>
A:
Yes, there is an easy way to delete an element from an array using PHP. The unset() function can be used to delete an element from an array, and the array_values() function can be used to reindex the array to ensure that the element is no longer included in the array when using the foreach loop.
For example, to delete the element with the key "foo" from an array called $array:
$array = array("foo" => "bar", "baz" => "qux");
// Delete the element with the key "foo"
unset($array["foo"]);
// Reindex the array
$array = array_values($array);
// Use the foreach loop
foreach ($array as $key => $value) {
// Do something with each element
}
This will ensure that the element with the key "foo" is no longer included in the array when using the foreach loop.
| Deleting an element from an array in PHP | Is there an easy way to delete an element from an array using PHP, such that foreach ($array) no longer includes that element?
I thought that setting it to null would do it, but apparently it does not work.
| [
"There are different ways to delete an array element, where some are more useful for some specific tasks than others.\nDeleting a single array element\nIf you want to delete just one array element you can use unset() or alternatively \\array_splice().\nIf you know the value and don’t know the key to delete the element you can use \\array_search() to get the key. This only works if the element does not occur more than once, since \\array_search returns the first hit only.\nunset()\nNote that when you use unset() the array keys won’t change. If you want to reindex the keys you can use \\array_values() after unset(), which will convert all keys to numerically enumerated keys starting from 0.\nCode:\n$array = [0 => \"a\", 1 => \"b\", 2 => \"c\"];\nunset($array[1]);\n // ↑ Key which you want to delete\n\nOutput:\n[\n [0] => a\n [2] => c\n]\n\n\\array_splice() method\nIf you use \\array_splice() the keys will automatically be reindexed, but the associative keys won’t change — as opposed to \\array_values(), which will convert all keys to numerical keys.\n\\array_splice() needs the offset, not the key, as the second parameter.\nCode:\n$array = [0 => \"a\", 1 => \"b\", 2 => \"c\"];\n\\array_splice($array, 1, 1);\n // ↑ Offset which you want to delete\n\nOutput:\n[\n [0] => a\n [1] => c\n]\n\narray_splice(), same as unset(), take the array by reference. You don’t assign the return values of those functions back to the array.\nDeleting multiple array elements\nIf you want to delete multiple array elements and don’t want to call unset() or \\array_splice() multiple times you can use the functions \\array_diff() or \\array_diff_key() depending on whether you know the values or the keys of the elements which you want to delete.\n\\array_diff() method\nIf you know the values of the array elements which you want to delete, then you can use \\array_diff(). As before with unset() it won’t change the keys of the array.\nCode:\n$array = [0 => \"a\", 1 => \"b\", 2 => \"c\", 3 => \"c\"];\n$array = \\array_diff($array, [\"a\", \"c\"]);\n // └────────┘\n // Array values which you want to delete\n\nOutput:\n[\n [1] => b\n]\n\n\\array_diff_key() method\nIf you know the keys of the elements which you want to delete, then you want to use \\array_diff_key(). You have to make sure you pass the keys as keys in the second parameter and not as values. Keys won’t reindex.\nCode:\n$array = [0 => \"a\", 1 => \"b\", 2 => \"c\"];\n$array = \\array_diff_key($array, [0 => \"xy\", \"2\" => \"xy\"]);\n // ↑ ↑\n // Array keys which you want to delete\n\nOutput:\n[\n [1] => b\n]\n\nIf you want to use unset() or \\array_splice() to delete multiple elements with the same value you can use \\array_keys() to get all the keys for a specific value and then delete all elements.\n\\array_filter() method\nIf you want to delete all elements with a specific value in the array you can use \\array_filter().\nCode:\n$array = [0 => \"a\", 1 => \"b\", 2 => \"c\"];\n$array = \\array_filter($array, static function ($element) {\n return $element !== \"b\";\n // ↑\n // Array value which you want to delete\n});\n\nOutput:\n[\n [0] => a\n [1] => c\n]\n\n",
"It should be noted that unset() will keep indexes untouched, which is what you'd expect when using string indexes (array as hashtable), but can be quite surprising when dealing with integer indexed arrays:\n$array = array(0, 1, 2, 3);\nunset($array[2]);\nvar_dump($array);\n/* array(3) {\n [0]=>\n int(0)\n [1]=>\n int(1)\n [3]=>\n int(3)\n} */\n\n$array = array(0, 1, 2, 3);\narray_splice($array, 2, 1);\nvar_dump($array);\n/* array(3) {\n [0]=>\n int(0)\n [1]=>\n int(1)\n [2]=>\n int(3)\n} */\n\nSo array_splice() can be used if you'd like to normalize your integer keys. Another option is using array_values() after unset():\n$array = array(0, 1, 2, 3);\n\nunset($array[2]);\n$array = array_values($array);\nvar_dump($array);\n/* array(3) {\n [0]=>\n int(0)\n [1]=>\n int(1)\n [2]=>\n int(3)\n} */\n\n",
" // Our initial array\n $arr = array(\"blue\", \"green\", \"red\", \"yellow\", \"green\", \"orange\", \"yellow\", \"indigo\", \"red\");\n print_r($arr);\n\n // Remove the elements who's values are yellow or red\n $arr = array_diff($arr, array(\"yellow\", \"red\"));\n print_r($arr);\n\nThis is the output from the code above:\nArray\n(\n [0] => blue\n [1] => green\n [2] => red\n [3] => yellow\n [4] => green\n [5] => orange\n [6] => yellow\n [7] => indigo\n [8] => red\n)\n\nArray\n(\n [0] => blue\n [1] => green\n [4] => green\n [5] => orange\n [7] => indigo\n)\n\nNow, array_values() will reindex a numerical array nicely, but it will remove all key strings from the array and replace them with numbers. If you need to preserve the key names (strings), or reindex the array if all keys are numerical, use array_merge():\n$arr = array_merge(array_diff($arr, array(\"yellow\", \"red\")));\nprint_r($arr);\n\nOutputs\nArray\n(\n [0] => blue\n [1] => green\n [2] => green\n [3] => orange\n [4] => indigo\n)\n\n",
"$key = array_search($needle, $array);\nif ($key !== false) {\n unset($array[$key]);\n}\n\n",
"unset($array[$index]);\n\n",
"Also, for a named element:\nunset($array[\"elementName\"]);\n\n",
"If you have a numerically indexed array where all values are unique (or they are non-unique but you wish to remove all instances of a particular value), you can simply use array_diff() to remove a matching element, like this:\n$my_array = array_diff($my_array, array('Value_to_remove'));\n\nFor example:\n$my_array = array('Andy', 'Bertha', 'Charles', 'Diana');\necho sizeof($my_array) . \"\\n\";\n$my_array = array_diff($my_array, array('Charles'));\necho sizeof($my_array);\n\nThis displays the following:\n4\n3\n\nIn this example, the element with the value 'Charles' is removed as can be verified by the sizeof() calls that report a size of 4 for the initial array, and 3 after the removal.\n",
"Destroy a single element of an array\nunset()\n$array1 = array('A', 'B', 'C', 'D', 'E');\nunset($array1[2]); // Delete known index(2) value from array\nvar_dump($array1);\n\nThe output will be:\narray(4) {\n [0]=>\n string(1) \"A\"\n [1]=>\n string(1) \"B\"\n [3]=>\n string(1) \"D\"\n [4]=>\n string(1) \"E\"\n}\n\nIf you need to re index the array:\n$array1 = array_values($array1);\nvar_dump($array1);\n\nThen the output will be:\narray(4) {\n [0]=>\n string(1) \"A\"\n [1]=>\n string(1) \"B\"\n [2]=>\n string(1) \"D\"\n [3]=>\n string(1) \"E\"\n}\n\nPop the element off the end of array - return the value of the removed element\nmixed array_pop(array &$array)\n$stack = array(\"orange\", \"banana\", \"apple\", \"raspberry\");\n$last_fruit = array_pop($stack);\nprint_r($stack);\nprint_r('Last Fruit:'.$last_fruit); // Last element of the array\n\nThe output will be\nArray\n(\n [0] => orange\n [1] => banana\n [2] => apple\n)\nLast Fruit: raspberry\n\nRemove the first element (red) from an array, - return the value of the removed element\nmixed array_shift ( array &$array )\n$color = array(\"a\" => \"red\", \"b\" => \"green\" , \"c\" => \"blue\");\n$first_color = array_shift($color);\nprint_r ($color);\nprint_r ('First Color: '.$first_color);\n\nThe output will be:\nArray\n(\n [b] => green\n [c] => blue\n)\nFirst Color: red\n\n",
"<?php\n $stack = [\"fruit1\", \"fruit2\", \"fruit3\", \"fruit4\"];\n $fruit = array_shift($stack);\n print_r($stack);\n\n echo $fruit;\n?>\n\nOutput: \n[\n [0] => fruit2\n [1] => fruit3\n [2] => fruit4\n]\n\nfruit1\n\n",
"If the index is specified:\n$arr = ['a', 'b', 'c'];\n$index = 0; \nunset($arr[$index]); // $arr = ['b', 'c']\n\nIf we have value instead of index:\n$arr = ['a', 'b', 'c'];\n\n// search the value to find index\n// Notice! this will only find the first occurrence of value\n$index = array_search('a', $arr);\n\nif($index !== false){\n unset($arr[$index]); // $arr = ['b', 'c']\n}\n\nThe if condition is necessary\nbecause if index is not found, unset() will automatically delete\nthe first element of the array which is not what we want.\n",
"If you have to delete multiple values in an array and the entries in that array are objects or structured data, array_filter() is your best bet. Those entries that return a true from the callback function will be retained.\n$array = [\n ['x'=>1,'y'=>2,'z'=>3], \n ['x'=>2,'y'=>4,'z'=>6], \n ['x'=>3,'y'=>6,'z'=>9]\n];\n\n$results = array_filter($array, function($value) {\n return $value['x'] > 2; \n}); //=> [['x'=>3,'y'=>6,z=>'9']]\n\n",
"Associative arrays\nFor associative arrays, use unset:\n$arr = array('a' => 1, 'b' => 2, 'c' => 3);\nunset($arr['b']);\n\n// RESULT: array('a' => 1, 'c' => 3)\n\n\nNumeric arrays\nFor numeric arrays, use array_splice:\n$arr = array(1, 2, 3);\narray_splice($arr, 1, 1);\n\n// RESULT: array(0 => 1, 1 => 3)\n\nNote\nUsing unset for numeric arrays will not produce an error, but it will mess up your indexes:\n$arr = array(1, 2, 3);\nunset($arr[1]);\n\n// RESULT: array(0 => 1, 2 => 3)\n\n",
"If you need to remove multiple elements from an associative array, you can use array_diff_key() (here used with array_flip()):\n$my_array = array(\n \"key1\" => \"value 1\",\n \"key2\" => \"value 2\",\n \"key3\" => \"value 3\",\n \"key4\" => \"value 4\",\n \"key5\" => \"value 5\",\n);\n\n$to_remove = array(\"key2\", \"key4\");\n\n$result = array_diff_key($my_array, array_flip($to_remove));\n\nprint_r($result);\n\nOutput:\nArray ( [key1] => value 1 [key3] => value 3 [key5] => value 5 ) \n\n",
"unset() destroys the specified variables.\nThe behavior of unset() inside of a function can vary depending on what type of variable you are attempting to destroy.\nIf a globalized variable is unset() inside of a function, only the local variable is destroyed. The variable in the calling environment will retain the same value as before unset() was called.\n<?php\n function destroy_foo()\n {\n global $foo;\n unset($foo);\n }\n\n $foo = 'bar';\n destroy_foo();\n echo $foo;\n?>\n\nThe answer of the above code will be bar.\nTo unset() a global variable inside of a function:\n<?php\n function foo()\n {\n unset($GLOBALS['bar']);\n }\n\n $bar = \"something\";\n foo();\n?>\n\n",
"// Remove by value\nfunction removeFromArr($arr, $val)\n{\n unset($arr[array_search($val, $arr)]);\n return array_values($arr);\n}\n\n",
"Solutions:\n\nTo delete one element, use unset():\n\n\nunset($array[3]);\nunset($array['foo']);\n\n\n\nTo delete multiple noncontiguous elements, also use unset():\n\n\nunset($array[3], $array[5]);\nunset($array['foo'], $array['bar']);\n\n\n\nTo delete multiple contiguous elements, use array_splice():\n\n\narray_splice($array, $offset, $length);\n\n\nFurther explanation:\nUsing these functions removes all references to these elements from PHP. If you want to keep a key in the array, but with an empty value, assign the empty string to the element:\n$array[3] = $array['foo'] = '';\n\nBesides syntax, there's a logical difference between using unset() and assigning '' to the element. The first says This doesn't exist anymore, while the second says This still exists, but its value is the empty string.\nIf you're dealing with numbers, assigning 0 may be a better alternative. So, if a company stopped production of the model XL1000 sprocket, it would update its inventory with:\nunset($products['XL1000']);\n\nHowever, if it temporarily ran out of XL1000 sprockets, but was planning to receive a new shipment from the plant later this week, this is better:\n$products['XL1000'] = 0;\n\nIf you unset() an element, PHP adjusts the array so that looping still works correctly. It doesn't compact the array to fill in the missing holes. This is what we mean when we say that all arrays are associative, even when they appear to be numeric. Here's an example:\n// Create a \"numeric\" array\n$animals = array('ant', 'bee', 'cat', 'dog', 'elk', 'fox');\nprint $animals[1]; // Prints 'bee'\nprint $animals[2]; // Prints 'cat'\ncount($animals); // Returns 6\n\n// unset()\nunset($animals[1]); // Removes element $animals[1] = 'bee'\nprint $animals[1]; // Prints '' and throws an E_NOTICE error\nprint $animals[2]; // Still prints 'cat'\ncount($animals); // Returns 5, even though $array[5] is 'fox'\n\n// Add a new element\n$animals[ ] = 'gnu'; // Add a new element (not Unix)\nprint $animals[1]; // Prints '', still empty\nprint $animals[6]; // Prints 'gnu', this is where 'gnu' ended up\ncount($animals); // Returns 6\n\n// Assign ''\n$animals[2] = ''; // Zero out value\nprint $animals[2]; // Prints ''\ncount($animals); // Returns 6, count does not decrease\n\nTo compact the array into a densely filled numeric array, use array_values():\n$animals = array_values($animals);\n\nAlternatively, array_splice() automatically reindexes arrays to avoid leaving holes:\n// Create a \"numeric\" array\n$animals = array('ant', 'bee', 'cat', 'dog', 'elk', 'fox');\narray_splice($animals, 2, 2);\nprint_r($animals);\nArray\n(\n [0] => ant\n [1] => bee\n [2] => elk\n [3] => fox\n)\n\nThis is useful if you're using the array as a queue and want to remove items from the queue while still allowing random access. To safely remove the first or last element from an array, use array_shift() and array_pop(), respectively.\n",
"Follow the default functions:\n\nPHP: unset\n\nunset() destroys the specified variables. For more info, you can refer to PHP unset\n$Array = array(\"test1\", \"test2\", \"test3\", \"test3\");\n\nunset($Array[2]);\n\n\nPHP: array_pop\n\nThe array_pop() function deletes the last element of an array. For more info, you can refer to PHP array_pop\n$Array = array(\"test1\", \"test2\", \"test3\", \"test3\");\n\narray_pop($Array);\n\n\nPHP: array_splice\n\nThe array_splice() function removes selected elements from an array and replaces it with new elements. For more info, you can refer to PHP array_splice\n$Array = array(\"test1\", \"test2\", \"test3\", \"test3\");\n\narray_splice($Array,1,2);\n\n\nPHP: array_shift\n\nThe array_shift() function removes the first element from an array. For more info, you can refer to PHP array_shift\n$Array = array(\"test1\", \"test2\", \"test3\", \"test3\");\n\narray_shift($Array);\n\n",
"I'd just like to say I had a particular object that had variable attributes (it was basically mapping a table and I was changing the columns in the table, so the attributes in the object, reflecting the table would vary as well):\nclass obj {\n protected $fields = array('field1','field2');\n protected $field1 = array();\n protected $field2 = array();\n protected loadfields(){}\n // This will load the $field1 and $field2 with rows of data for the column they describe\n protected function clearFields($num){\n foreach($fields as $field) {\n unset($this->$field[$num]);\n // This did not work the line below worked\n unset($this->{$field}[$num]); // You have to resolve $field first using {}\n }\n }\n}\n\nThe whole purpose of $fields was just, so I don't have to look everywhere in the code when they're changed, I just look at the beginning of the class and change the list of attributes and the $fields array content to reflect the new attributes.\n",
"Suppose you have the following array:\nArray\n(\n [user_id] => 193\n [storage] => 5\n)\n\nTo delete storage, do:\nunset($attributes['storage']);\n$attributes = array_filter($attributes);\n\nAnd you get:\nArray\n(\n [user_id] => 193\n)\n\n",
"Two ways for removing the first item of an array with keeping order of the index and also if you don't know the key name of the first item.\nSolution #1\n// 1 is the index of the first object to get\n// NULL to get everything until the end\n// true to preserve keys\n$array = array_slice($array, 1, null, true);\n\nSolution #2\n// Rewinds the array's internal pointer to the first element\n// and returns the value of the first array element.\n$value = reset($array);\n// Returns the index element of the current array position\n$key = key($array);\nunset($array[$key]);\n\n\nFor this sample data:\n$array = array(10 => \"a\", 20 => \"b\", 30 => \"c\");\n\nYou must have this result:\narray(2) {\n [20]=>\n string(1) \"b\"\n [30]=>\n string(1) \"c\"\n}\n\n",
"unset() multiple, fragmented elements from an array\nWhile unset() has been mentioned here several times, it has yet to be mentioned that unset() accepts multiple variables making it easy to delete multiple, noncontiguous elements from an array in one operation:\n// Delete multiple, noncontiguous elements from an array\n$array = [ 'foo', 'bar', 'baz', 'quz' ];\nunset( $array[2], $array[3] );\nprint_r($array);\n// Output: [ 'foo', 'bar' ]\n\nunset() dynamically\nunset() does not accept an array of keys to remove, so the code below will fail (it would have made it slightly easier to use unset() dynamically though).\n$array = range(0,5);\n$remove = [1,2];\n$array = unset( $remove ); // FAILS: \"unexpected 'unset'\"\nprint_r($array);\n\nInstead, unset() can be used dynamically in a foreach loop:\n$array = range(0,5);\n$remove = [1,2];\nforeach ($remove as $k=>$v) {\n unset($array[$v]);\n}\nprint_r($array);\n// Output: [ 0, 3, 4, 5 ]\n\nRemove array keys by copying the array\nThere is also another practice that has yet to be mentioned.\nSometimes, the simplest way to get rid of certain array keys is to simply copy $array1 into $array2.\n$array1 = range(1,10);\nforeach ($array1 as $v) {\n // Remove all even integers from the array\n if( $v % 2 ) {\n $array2[] = $v;\n }\n}\nprint_r($array2);\n// Output: [ 1, 3, 5, 7, 9 ];\n\nObviously, the same practice applies to text strings:\n$array1 = [ 'foo', '_bar', 'baz' ];\nforeach ($array1 as $v) {\n // Remove all strings beginning with underscore\n if( strpos($v,'_')===false ) {\n $array2[] = $v;\n }\n}\nprint_r($array2);\n// Output: [ 'foo', 'baz' ]\n\n",
"<?php\n // If you want to remove a particular array element use this method\n $my_array = array(\"key1\"=>\"value 1\", \"key2\"=>\"value 2\", \"key3\"=>\"value 3\");\n\n print_r($my_array);\n if (array_key_exists(\"key1\", $my_array)) {\n unset($my_array['key1']);\n print_r($my_array);\n }\n else {\n echo \"Key does not exist\";\n }\n?>\n\n<?php\n //To remove first array element\n $my_array = array(\"key1\"=>\"value 1\", \"key2\"=>\"value 2\", \"key3\"=>\"value 3\");\n print_r($my_array);\n $new_array = array_slice($my_array, 1);\n print_r($new_array);\n?>\n\n\n<?php\n echo \"<br/> \";\n // To remove first array element to length\n // starts from first and remove two element\n $my_array = array(\"key1\"=>\"value 1\", \"key2\"=>\"value 2\", \"key3\"=>\"value 3\");\n print_r($my_array);\n $new_array = array_slice($my_array, 1, 2);\n print_r($new_array);\n?>\n\nOutput\n Array ( [key1] => value 1 [key2] => value 2 [key3] =>\n value 3 ) Array ( [key2] => value 2 [key3] => value 3 )\n Array ( [key1] => value 1 [key2] => value 2 [key3] => value 3 )\n Array ( [key2] => value 2 [key3] => value 3 )\n Array ( [key1] => value 1 [key2] => value 2 [key3] => value 3 )\n Array ( [key2] => value 2 [key3] => value 3 )\n\n",
"Remove an array element based on a key:\nUse the unset function like below:\n$a = array(\n 'salam',\n '10',\n 1\n);\n\nunset($a[1]);\n\nprint_r($a);\n\n/*\n\n Output:\n\n Array\n (\n [0] => salam\n [2] => 1\n )\n\n*/\n\nRemove an array element based on value:\nUse the array_search function to get an element key and use the above manner to remove an array element like below:\n$a = array(\n 'salam',\n '10',\n 1\n);\n\n$key = array_search(10, $a);\n\nif ($key !== false) {\n unset($a[$key]);\n}\n\nprint_r($a);\n\n/*\n\n Output:\n\n Array\n (\n [0] => salam\n [2] => 1\n )\n\n*/\n\n",
"Edit\nIf you can't take it as given that the object is in that array you need to add a check:\nif(in_array($object,$array)) unset($array[array_search($object,$array)]);\n\nOriginal Answer\nif you want to remove a specific object of an array by reference of that object you can do following:\nunset($array[array_search($object,$array)]);\n\nExample:\n<?php\nclass Foo\n{\n public $id;\n public $name;\n}\n\n$foo1 = new Foo();\n$foo1->id = 1;\n$foo1->name = 'Name1';\n\n$foo2 = new Foo();\n$foo2->id = 2;\n$foo2->name = 'Name2';\n\n$foo3 = new Foo();\n$foo3->id = 3;\n$foo3->name = 'Name3';\n\n\n$array = array($foo1,$foo2,$foo3);\nunset($array[array_search($foo2,$array)]);\n\necho '<pre>';\nvar_dump($array);\necho '</pre>';\n?>\n\nResult:\narray(2) {\n[0]=>\n object(Foo)#1 (2) {\n [\"id\"]=>\n int(1)\n [\"name\"]=>\n string(5) \"Name1\"\n }\n[2]=>\n object(Foo)#3 (2) {\n [\"id\"]=>\n int(3)\n [\"name\"]=>\n string(5) \"Name3\"\n }\n}\n\nNote that if the object occures several times it will only be removed the first occurence!\n",
"Use the following code:\n$arr = array('orange', 'banana', 'apple', 'raspberry');\n$result = array_pop($arr);\nprint_r($result);\n\n",
"I came here because I wanted to see if there was a more elegant solution to this problem than using unset($arr[$i]). To my disappointment these answers are either wrong or do not cover every edge case.\nHere is why array_diff() does not work. Keys are unique in the array, while elements are not always unique.\n$arr = [1,2,2,3];\n\nforeach($arr as $i => $n){\n $b = array_diff($arr,[$n]);\n echo \"\\n\".json_encode($b);\n}\n\nResults...\n[2,2,3]\n[1,3]\n[1,2,2] \n\nIf two elements are the same they will be remove. This also applies for array_search() and array_flip().\nI saw a lot of answers with array_slice() and array_splice(), but these functions only work with numeric arrays. All the answers I am aware if here does not answer the question, and so here is a solution that will work.\n$arr = [1,2,3];\n\nforeach($arr as $i => $n){\n $b = array_merge(array_slice($arr,0,$i),array_slice($arr,$i+1));\n echo \"\\n\".json_encode($b);\n}\n\nResults...\n\n[2,3];\n[1,3];\n[1,2];\n\nSince unset($arr[$i]) will work on both associative array and numeric arrays this still does not answer the question.\nThis solution is to compare the keys and with a tool that will handle both numeric and associative arrays. I use array_diff_uassoc() for this. This function compares the keys in a call back function.\n$arr = [1,2,2,3];\n//$arr = ['a'=>'z','b'=>'y','c'=>'x','d'=>'w'];\nforeach($arr as $key => $n){\n $b = array_diff_uassoc($arr, [$key=>$n], function($a,$b) {\n if($a != $b){\n return 1;\n }\n });\n echo \"\\n\".json_encode($b);\n} \n\nResults.....\n[2,2,3];\n[1,2,3];\n[1,2,2];\n\n['b'=>'y','c'=>'x','d'=>'w'];\n['a'=>'z','c'=>'x','d'=>'w'];\n['a'=>'z','b'=>'y','d'=>'w'];\n['a'=>'z','b'=>'y','c'=>'x'];\n\n",
"Step one\nfirt of all start with php syntex\n<?php\n\nStep Two\ncreate an array named animals\n<?php \n$animals= array(\n \n 'cat', // [0]\n 'dog', // [1]\n 'cow' // [2]\n \n);\n\nStep three\nremove item at index 1 which is 'for'\nunset($animals1[1]); \n\nStep four\nPrint modified array\nvar_dump($danimals1);\n\nStep five\nRe-index the array elements\n$newarray = array_values($animals1);\n \n\nStep six\nPrint re-indexed array\nvar_dump($newarray);\n\nStep seven\nClose php code\n?>\n\n",
"Yes, there is an easy way to delete an element from an array using PHP. The unset() function can be used to delete an element from an array, and the array_values() function can be used to reindex the array to ensure that the element is no longer included in the array when using the foreach loop.\nFor example, to delete the element with the key \"foo\" from an array called $array:\n$array = array(\"foo\" => \"bar\", \"baz\" => \"qux\");\n\n// Delete the element with the key \"foo\"\nunset($array[\"foo\"]);\n\n// Reindex the array\n$array = array_values($array);\n\n// Use the foreach loop\nforeach ($array as $key => $value) {\n// Do something with each element\n}\n\nThis will ensure that the element with the key \"foo\" is no longer included in the array when using the foreach loop.\n"
] | [
3450,
1410,
413,
232,
101,
71,
71,
38,
37,
29,
27,
22,
22,
21,
19,
13,
11,
10,
9,
9,
8,
7,
7,
7,
6,
1,
1,
0
] | [] | [] | [
"arrays",
"php",
"unset"
] | stackoverflow_0000369602_arrays_php_unset.txt |
Q:
I am conducting transfer learning of StyleGan3 to another custom dataset
Trying to transfer learning to another custom datset using stylegan3. I reached this mentioned error due to the size of images. How can i go through this problem and furnish the appropriate size of dataset image 1024*1024.
File "/content/stylegan3/training/dataset.py", line 128, in resolution
assert self.image_shape[1] == self.image_shape[2]
AssertionError
A:
you can fix this error by ensuring that the images in your dataset have a resolution of 1024x1024 pixels.
To do this, you can first resize the images in your dataset using an image editing tool or a script. Once the images have been resized to 1024x1024 pixels, you can then use StyleGAN3 to transfer learning to the custom dataset.
Here's an example of how you can resize the images in your dataset using the Python Imaging Library (PIL):
# Import the required libraries
from PIL import Image
import os
# Set the directory containing the original images
original_dir = "./original"
# Set the directory to save the resized images
resized_dir = "./resized"
# Iterate over the images in the original directory
for filename in os.listdir(original_dir):
# Open the original image
original_image = Image.open(os.path.join(original_dir, filename))
# Resize the image to 1024x1024 pixels
resized_image = original_image.resize((1024, 1024))
# Save the resized image
resized_image.save(os.path.join(resized_dir, filename))
After resizing the images in your dataset, you can then use StyleGAN3 to transfer learning to the dataset.
| I am conducting transfer learning of StyleGan3 to another custom dataset | Trying to transfer learning to another custom datset using stylegan3. I reached this mentioned error due to the size of images. How can i go through this problem and furnish the appropriate size of dataset image 1024*1024.
File "/content/stylegan3/training/dataset.py", line 128, in resolution
assert self.image_shape[1] == self.image_shape[2]
AssertionError
| [
"you can fix this error by ensuring that the images in your dataset have a resolution of 1024x1024 pixels.\nTo do this, you can first resize the images in your dataset using an image editing tool or a script. Once the images have been resized to 1024x1024 pixels, you can then use StyleGAN3 to transfer learning to the custom dataset.\nHere's an example of how you can resize the images in your dataset using the Python Imaging Library (PIL):\n# Import the required libraries\nfrom PIL import Image\nimport os\n\n# Set the directory containing the original images\noriginal_dir = \"./original\"\n\n# Set the directory to save the resized images\nresized_dir = \"./resized\"\n\n# Iterate over the images in the original directory\nfor filename in os.listdir(original_dir):\n # Open the original image\n original_image = Image.open(os.path.join(original_dir, filename))\n\n # Resize the image to 1024x1024 pixels\n resized_image = original_image.resize((1024, 1024))\n\n # Save the resized image\n resized_image.save(os.path.join(resized_dir, filename))\n\nAfter resizing the images in your dataset, you can then use StyleGAN3 to transfer learning to the dataset.\n"
] | [
0
] | [] | [] | [
"error_handling",
"image",
"resize",
"stylegan",
"transfer_learning"
] | stackoverflow_0074675351_error_handling_image_resize_stylegan_transfer_learning.txt |
Q:
Where does this Python script go in the __init__py file?
So I am trying to create my first Azure function. I am currently following a tutorial online. I managed to create an environment, install the necessary packages via Visual Studio Code. I actually have my Python script which looks like this:
import sqlalchemyimport pandas as pd
sqlcon = sqlalchemy.create_engine('mssql://LBCUCRDBS100TL/AdventureWorks?driver=ODBC+Driver+17+for+SQL+Server')
path=r'C:\Users\H\Desktop\Correspondence\Correspondence_Received_with_Types.xlsx'
df=pd.read_excel(path)
df.to_sql('Correspondence',sqlcon,if_exists='append', index=False)
SQLQuery1="UPDATE Correspondence SET [Response By] = REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE (REPLACE(REPLACE([Response By], 'Junaid Dar', 'Robson de Souza') , 'Jade Buckingham','Rivaldo Vitor Borba Ferreira'), 'Danielle Wallace','Ronaldo de Assis Moreira'), 'Sadiya Nurmahomed','Wayne Mark Rooney'),'Tendai Giwa','Joseph John Cole'), 'Tiffany Heslop', 'Lionel Andrés Messi'), 'Gareth Budiar','Steven George Gerrard'),'Jeneen Nicholson','Samuel Eto''o Fils'), 'James Beaton','Carlos Alberto Tevez'), 'Kirsty Moore','Vincent Jean Mpoy Kompany'),'Kelly McDonald','Leroy Aziz Sané'), 'Muzaffer Mehmet','Gareth Frank Bale'), 'Charmaine Banerji','Ronaldo Luís Nazário de Lima'),'Paul Hunt','Luís Carlos Almeida de Cunha'), 'Jessica Ararat-David', 'Dimitar Ivanov Berbatov'), 'Anita Hayler','José María Gutiérrez Hernández'), 'Vishal Chandel','Diego Armando Maradona'), 'Saleema Panjwani','Edson Arantes do Nascimento'), 'Mohammed Uddin','Thierry Daniel Henry'),'Edward Ford','Nicolas Sébastien Anelka'), 'Gillian Sutherland', 'Zinedine Yazid Zidane'), 'Abdul Jimoh','Gabriel Omar Batistuta'),'Lee Woolward','Emmanuel Laurent Petit'), 'Antonia Akintaju','Robert Emmanuel Pires'),'Maria Dayang','Roberto Carlos da Silva Rocha'), 'Latisha McFarlane','David Robert Joseph Beckham'), 'Janine Townsend', 'Francesco Totti'), 'Vaughan Anderson-Moore','Alessandro Del Piero')"
SQLQuery2="UPDATE Correspondence SET [CPZ Name] = REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE (REPLACE(REPLACE([CPZ Name], 'North Camberwell', 'Dorne') , 'South Rotherhithe','King’s Landing'), 'Thorburn Square','Dragonstone'), 'All Non CPZ highway south of South Circular Road (A205)','Braavos'),'Housing Estates', 'Crownlands'), 'Parks Car Park', 'Winterfell'), 'South East Walworth', 'The Iron Islands'),'R - North Peckham','Casterly Rock'), 'CCTV Camera','Lannisport'),'Herne Hill', 'Volantis'),'South Camberwell', 'The Westerlands'),'South Bermondsey','Riverrun'), 'Rotherhithe', 'Highgarden'), ' Bankside','Storm’s End'), 'Trafalgar','The Kingsroad'), 'North Dulwich and Denmark Hill', 'Ashemark'), 'All Non CPZ highway north of South Circular Road (A205)','Blackmont'),'North Dulwich and Denmark Hill', 'Asshai'),'Newington', 'Doune Castle'), 'Peckham Road South', 'Horn Hill'),'South East Bermondsey', 'Banefort'), 'East dulwich','Red Keep'), ' East Camberwell', 'South Park'), 'Car Parks', 'Springfield'), 'Borough', 'Hogwarts'), 'Lucas Gardens', 'Neverland'), 'Peckham West', 'Gotham City'), ' East Camberwell', 'Wonderland'), 'Dog Kennel Hill', 'Stars Hollow'), 'West Walworth', 'Jurassic Park'), 'Bermondsey', 'Atlantis'), 'South Walworth', 'Asgard'), 'The Grange', 'The Shire'), 'London Bridge', 'Emerald City'), 'Walworth','Hogsmeade'), 'Peckham','Mordor'), 'Camberwell','Lilliput')"
SQLQuery3="UPDATE Correspondence SET [CPZ Code] = CASE WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'A' THEN 'A1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'B' THEN 'A2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'C1' THEN 'C1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'C2' THEN 'C2' WHEN LEN([CPZ Code]) = 3 AND [CPZ Code] = 'CAM' THEN 'D1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'CP' THEN 'D2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'D' THEN 'E1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'E' THEN 'E2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'EC' THEN 'F1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'ED' THEN 'F2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'F' THEN 'G1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'G' THEN 'G2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'GR' THEN 'H1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'H' THEN 'H2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'HE' THEN 'I1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'HH' THEN 'I2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'J' THEN 'J1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'K' THEN 'J2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'L' THEN 'K1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'LG' THEN 'K2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'M1' THEN 'L1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'M2' THEN 'L2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'N' THEN 'M1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'NC' THEN 'M2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'P' THEN 'N1' WHEN LEN([CPZ Code]) = 3 AND [CPZ Code] = 'PCP' THEN 'N2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'PR' THEN 'O1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'PW' THEN 'O2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'Q' THEN 'P1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'R' THEN 'P2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'SB' THEN 'Q1' WHEN LEN([CPZ Code]) = 3 AND [CPZ Code] = 'SEB' THEN 'Q2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'T' THEN 'R1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'TS' THEN 'R2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'Z' THEN 'S1' ELSE [CPZ Code] END"
SQLQuery4="UPDATE Correspondence SET [Ticket #]=CONCAT('A3',STUFF([Ticket #],1,2,'')) "
sqlcon.execute(SQLQuery1)
sqlcon.execute(SQLQuery2)
sqlcon.execute(SQLQuery3)
sqlcon.execute(SQLQuery4)
The default __init__py file looks like this:
import logging
import azure.function as func
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request')
name = req.params.get('name')
if not name:
try:
req_body = req.get_json()
except ValueError:
pass
else:
name = req_body.get('name')
if name:
return func.HttpResponse(f"Hello {name}!")
else:
return func.HttpResponse(
"Please pass a name in the query string or in the request body",
status_code=400
)
I pasted my code in the azure function like so:
import logging
import azure.function as func
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request')
import sqlalchemy
import pandas as pd
sqlcon = sqlalchemy.create_engine('mssql://LBCUCRDBS100TL/AdventureWorks?driver=ODBC+Driver+17+for+SQL+Server')
path=r'C:\Users\H\Desktop\Correspondence\Correspondence_Received_with_Types.xlsx'
df=pd.read_excel(path)
df.to_sql('Correspondence',sqlcon,if_exists='append', index=False)
SQLQuery1="UPDATE Correspondence SET [Response By] = REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE (REPLACE(REPLACE([Response By], 'Junaid Dar', 'Robson de Souza') , 'Jade Buckingham','Rivaldo Vitor Borba Ferreira'), 'Danielle Wallace','Ronaldo de Assis Moreira'), 'Sadiya Nurmahomed','Wayne Mark Rooney'),'Tendai Giwa','Joseph John Cole'), 'Tiffany Heslop', 'Lionel Andrés Messi'), 'Gareth Budiar','Steven George Gerrard'),'Jeneen Nicholson','Samuel Eto''o Fils'), 'James Beaton','Carlos Alberto Tevez'), 'Kirsty Moore','Vincent Jean Mpoy Kompany'),'Kelly McDonald','Leroy Aziz Sané'), 'Muzaffer Mehmet','Gareth Frank Bale'), 'Charmaine Banerji','Ronaldo Luís Nazário de Lima'),'Paul Hunt','Luís Carlos Almeida de Cunha'), 'Jessica Ararat-David', 'Dimitar Ivanov Berbatov'), 'Anita Hayler','José María Gutiérrez Hernández'), 'Vishal Chandel','Diego Armando Maradona'), 'Saleema Panjwani','Edson Arantes do Nascimento'), 'Mohammed Uddin','Thierry Daniel Henry'),'Edward Ford','Nicolas Sébastien Anelka'), 'Gillian Sutherland', 'Zinedine Yazid Zidane'), 'Abdul Jimoh','Gabriel Omar Batistuta'),'Lee Woolward','Emmanuel Laurent Petit'), 'Antonia Akintaju','Robert Emmanuel Pires'),'Maria Dayang','Roberto Carlos da Silva Rocha'), 'Latisha McFarlane','David Robert Joseph Beckham'), 'Janine Townsend', 'Francesco Totti'), 'Vaughan Anderson-Moore','Alessandro Del Piero')"
SQLQuery2="UPDATE Correspondence SET [CPZ Name] = REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE (REPLACE(REPLACE([CPZ Name], 'North Camberwell', 'Dorne') , 'South Rotherhithe','King’s Landing'), 'Thorburn Square','Dragonstone'), 'All Non CPZ highway south of South Circular Road (A205)','Braavos'),'Housing Estates', 'Crownlands'), 'Parks Car Park', 'Winterfell'), 'South East Walworth', 'The Iron Islands'),'R - North Peckham','Casterly Rock'), 'CCTV Camera','Lannisport'),'Herne Hill', 'Volantis'),'South Camberwell', 'The Westerlands'),'South Bermondsey','Riverrun'), 'Rotherhithe', 'Highgarden'), ' Bankside','Storm’s End'), 'Trafalgar','The Kingsroad'), 'North Dulwich and Denmark Hill', 'Ashemark'), 'All Non CPZ highway north of South Circular Road (A205)','Blackmont'),'North Dulwich and Denmark Hill', 'Asshai'),'Newington', 'Doune Castle'), 'Peckham Road South', 'Horn Hill'),'South East Bermondsey', 'Banefort'), 'East dulwich','Red Keep'), ' East Camberwell', 'South Park'), 'Car Parks', 'Springfield'), 'Borough', 'Hogwarts'), 'Lucas Gardens', 'Neverland'), 'Peckham West', 'Gotham City'), ' East Camberwell', 'Wonderland'), 'Dog Kennel Hill', 'Stars Hollow'), 'West Walworth', 'Jurassic Park'), 'Bermondsey', 'Atlantis'), 'South Walworth', 'Asgard'), 'The Grange', 'The Shire'), 'London Bridge', 'Emerald City'), 'Walworth','Hogsmeade'), 'Peckham','Mordor'), 'Camberwell','Lilliput')"
SQLQuery3="UPDATE Correspondence SET [CPZ Code] = CASE WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'A' THEN 'A1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'B' THEN 'A2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'C1' THEN 'C1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'C2' THEN 'C2' WHEN LEN([CPZ Code]) = 3 AND [CPZ Code] = 'CAM' THEN 'D1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'CP' THEN 'D2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'D' THEN 'E1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'E' THEN 'E2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'EC' THEN 'F1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'ED' THEN 'F2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'F' THEN 'G1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'G' THEN 'G2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'GR' THEN 'H1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'H' THEN 'H2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'HE' THEN 'I1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'HH' THEN 'I2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'J' THEN 'J1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'K' THEN 'J2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'L' THEN 'K1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'LG' THEN 'K2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'M1' THEN 'L1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'M2' THEN 'L2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'N' THEN 'M1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'NC' THEN 'M2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'P' THEN 'N1' WHEN LEN([CPZ Code]) = 3 AND [CPZ Code] = 'PCP' THEN 'N2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'PR' THEN 'O1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'PW' THEN 'O2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'Q' THEN 'P1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'R' THEN 'P2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'SB' THEN 'Q1' WHEN LEN([CPZ Code]) = 3 AND [CPZ Code] = 'SEB' THEN 'Q2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'T' THEN 'R1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'TS' THEN 'R2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'Z' THEN 'S1' ELSE [CPZ Code] END"
SQLQuery4="UPDATE Correspondence SET [Ticket #]=CONCAT('A3',STUFF([Ticket #],1,2,'')) "
sqlcon.execute(SQLQuery1)
sqlcon.execute(SQLQuery2)
sqlcon.execute(SQLQuery3)
sqlcon.execute(SQLQuery4)
name = req.params.get('name')
if not name:
try:
req_body = req.get_json()
except ValueError:
pass
else:
name = req_body.get('name')
if name:
return func.HttpResponse(f"Hello {name}!")
else:
return func.HttpResponse(
"Please pass a name in the query string or in the request body",
status_code=400
)
When editing the code in Visual Studio Code I actually do not get an error but when I called the Azure function nothing happens. I was expecting it to access the SQL database and update the data.
A:
Based on your code, this might be because for the code you have used. Make sure you place all the import statements at the top and run the azure functions. Make sure you are actually sending the HTTP request by hitting the function and giving the name while calling the function as per your code. Below is how it should look like,
import logging
import sqlalchemy
import pandas as pd
import azure.function as func
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request')
name = req.params.get('name')
sqlcon = sqlalchemy.create_engine('mssql://LBCUCRDBS100TL/AdventureWorks?driver=ODBC+Driver+17+for+SQL+Server')
path=r'C:\Users\H\Desktop\Correspondence\Correspondence_Received_with_Types.xlsx'
df=pd.read_excel(path)
df.to_sql('Correspondence',sqlcon,if_exists='append', index=False)
SQLQuery1="UPDATE Correspondence SET [Response By] = REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE (REPLACE(REPLACE([Response By], 'Junaid Dar', 'Robson de Souza') , 'Jade Buckingham','Rivaldo Vitor Borba Ferreira'), 'Danielle Wallace','Ronaldo de Assis Moreira'), 'Sadiya Nurmahomed','Wayne Mark Rooney'),'Tendai Giwa','Joseph John Cole'), 'Tiffany Heslop', 'Lionel Andrés Messi'), 'Gareth Budiar','Steven George Gerrard'),'Jeneen Nicholson','Samuel Eto''o Fils'), 'James Beaton','Carlos Alberto Tevez'), 'Kirsty Moore','Vincent Jean Mpoy Kompany'),'Kelly McDonald','Leroy Aziz Sané'), 'Muzaffer Mehmet','Gareth Frank Bale'), 'Charmaine Banerji','Ronaldo Luís Nazário de Lima'),'Paul Hunt','Luís Carlos Almeida de Cunha'), 'Jessica Ararat-David', 'Dimitar Ivanov Berbatov'), 'Anita Hayler','José María Gutiérrez Hernández'), 'Vishal Chandel','Diego Armando Maradona'), 'Saleema Panjwani','Edson Arantes do Nascimento'), 'Mohammed Uddin','Thierry Daniel Henry'),'Edward Ford','Nicolas Sébastien Anelka'), 'Gillian Sutherland', 'Zinedine Yazid Zidane'), 'Abdul Jimoh','Gabriel Omar Batistuta'),'Lee Woolward','Emmanuel Laurent Petit'), 'Antonia Akintaju','Robert Emmanuel Pires'),'Maria Dayang','Roberto Carlos da Silva Rocha'), 'Latisha McFarlane','David Robert Joseph Beckham'), 'Janine Townsend', 'Francesco Totti'), 'Vaughan Anderson-Moore','Alessandro Del Piero')"
SQLQuery2="UPDATE Correspondence SET [CPZ Name] = REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE (REPLACE(REPLACE([CPZ Name], 'North Camberwell', 'Dorne') , 'South Rotherhithe','King’s Landing'), 'Thorburn Square','Dragonstone'), 'All Non CPZ highway south of South Circular Road (A205)','Braavos'),'Housing Estates', 'Crownlands'), 'Parks Car Park', 'Winterfell'), 'South East Walworth', 'The Iron Islands'),'R - North Peckham','Casterly Rock'), 'CCTV Camera','Lannisport'),'Herne Hill', 'Volantis'),'South Camberwell', 'The Westerlands'),'South Bermondsey','Riverrun'), 'Rotherhithe', 'Highgarden'), ' Bankside','Storm’s End'), 'Trafalgar','The Kingsroad'), 'North Dulwich and Denmark Hill', 'Ashemark'), 'All Non CPZ highway north of South Circular Road (A205)','Blackmont'),'North Dulwich and Denmark Hill', 'Asshai'),'Newington', 'Doune Castle'), 'Peckham Road South', 'Horn Hill'),'South East Bermondsey', 'Banefort'), 'East dulwich','Red Keep'), ' East Camberwell', 'South Park'), 'Car Parks', 'Springfield'), 'Borough', 'Hogwarts'), 'Lucas Gardens', 'Neverland'), 'Peckham West', 'Gotham City'), ' East Camberwell', 'Wonderland'), 'Dog Kennel Hill', 'Stars Hollow'), 'West Walworth', 'Jurassic Park'), 'Bermondsey', 'Atlantis'), 'South Walworth', 'Asgard'), 'The Grange', 'The Shire'), 'London Bridge', 'Emerald City'), 'Walworth','Hogsmeade'), 'Peckham','Mordor'), 'Camberwell','Lilliput')"
SQLQuery3="UPDATE Correspondence SET [CPZ Code] = CASE WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'A' THEN 'A1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'B' THEN 'A2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'C1' THEN 'C1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'C2' THEN 'C2' WHEN LEN([CPZ Code]) = 3 AND [CPZ Code] = 'CAM' THEN 'D1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'CP' THEN 'D2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'D' THEN 'E1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'E' THEN 'E2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'EC' THEN 'F1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'ED' THEN 'F2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'F' THEN 'G1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'G' THEN 'G2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'GR' THEN 'H1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'H' THEN 'H2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'HE' THEN 'I1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'HH' THEN 'I2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'J' THEN 'J1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'K' THEN 'J2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'L' THEN 'K1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'LG' THEN 'K2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'M1' THEN 'L1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'M2' THEN 'L2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'N' THEN 'M1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'NC' THEN 'M2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'P' THEN 'N1' WHEN LEN([CPZ Code]) = 3 AND [CPZ Code] = 'PCP' THEN 'N2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'PR' THEN 'O1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'PW' THEN 'O2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'Q' THEN 'P1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'R' THEN 'P2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'SB' THEN 'Q1' WHEN LEN([CPZ Code]) = 3 AND [CPZ Code] = 'SEB' THEN 'Q2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'T' THEN 'R1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'TS' THEN 'R2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'Z' THEN 'S1' ELSE [CPZ Code] END"
SQLQuery4="UPDATE Correspondence SET [Ticket #]=CONCAT('A3',STUFF([Ticket #],1,2,'')) "
sqlcon.execute(SQLQuery1)
sqlcon.execute(SQLQuery2)
sqlcon.execute(SQLQuery3)
sqlcon.execute(SQLQuery4)
if not name:
try:
req_body = req.get_json()
except ValueError:
pass
else:
name = req_body.get('name')
if name:
return func.HttpResponse(f"Hello {name}!")
else:
return func.HttpResponse(
"Please pass a name in the query string or in the request body",
status_code=400
)
To run azure function and make a call to your HTTP Function
You can use the below url to call your function.
http://localhost:7071/api/HttpTrigger1?name=someName
If you are running through power automate make sure you pass a name to the function to get it called.
| Where does this Python script go in the __init__py file? | So I am trying to create my first Azure function. I am currently following a tutorial online. I managed to create an environment, install the necessary packages via Visual Studio Code. I actually have my Python script which looks like this:
import sqlalchemyimport pandas as pd
sqlcon = sqlalchemy.create_engine('mssql://LBCUCRDBS100TL/AdventureWorks?driver=ODBC+Driver+17+for+SQL+Server')
path=r'C:\Users\H\Desktop\Correspondence\Correspondence_Received_with_Types.xlsx'
df=pd.read_excel(path)
df.to_sql('Correspondence',sqlcon,if_exists='append', index=False)
SQLQuery1="UPDATE Correspondence SET [Response By] = REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE (REPLACE(REPLACE([Response By], 'Junaid Dar', 'Robson de Souza') , 'Jade Buckingham','Rivaldo Vitor Borba Ferreira'), 'Danielle Wallace','Ronaldo de Assis Moreira'), 'Sadiya Nurmahomed','Wayne Mark Rooney'),'Tendai Giwa','Joseph John Cole'), 'Tiffany Heslop', 'Lionel Andrés Messi'), 'Gareth Budiar','Steven George Gerrard'),'Jeneen Nicholson','Samuel Eto''o Fils'), 'James Beaton','Carlos Alberto Tevez'), 'Kirsty Moore','Vincent Jean Mpoy Kompany'),'Kelly McDonald','Leroy Aziz Sané'), 'Muzaffer Mehmet','Gareth Frank Bale'), 'Charmaine Banerji','Ronaldo Luís Nazário de Lima'),'Paul Hunt','Luís Carlos Almeida de Cunha'), 'Jessica Ararat-David', 'Dimitar Ivanov Berbatov'), 'Anita Hayler','José María Gutiérrez Hernández'), 'Vishal Chandel','Diego Armando Maradona'), 'Saleema Panjwani','Edson Arantes do Nascimento'), 'Mohammed Uddin','Thierry Daniel Henry'),'Edward Ford','Nicolas Sébastien Anelka'), 'Gillian Sutherland', 'Zinedine Yazid Zidane'), 'Abdul Jimoh','Gabriel Omar Batistuta'),'Lee Woolward','Emmanuel Laurent Petit'), 'Antonia Akintaju','Robert Emmanuel Pires'),'Maria Dayang','Roberto Carlos da Silva Rocha'), 'Latisha McFarlane','David Robert Joseph Beckham'), 'Janine Townsend', 'Francesco Totti'), 'Vaughan Anderson-Moore','Alessandro Del Piero')"
SQLQuery2="UPDATE Correspondence SET [CPZ Name] = REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE (REPLACE(REPLACE([CPZ Name], 'North Camberwell', 'Dorne') , 'South Rotherhithe','King’s Landing'), 'Thorburn Square','Dragonstone'), 'All Non CPZ highway south of South Circular Road (A205)','Braavos'),'Housing Estates', 'Crownlands'), 'Parks Car Park', 'Winterfell'), 'South East Walworth', 'The Iron Islands'),'R - North Peckham','Casterly Rock'), 'CCTV Camera','Lannisport'),'Herne Hill', 'Volantis'),'South Camberwell', 'The Westerlands'),'South Bermondsey','Riverrun'), 'Rotherhithe', 'Highgarden'), ' Bankside','Storm’s End'), 'Trafalgar','The Kingsroad'), 'North Dulwich and Denmark Hill', 'Ashemark'), 'All Non CPZ highway north of South Circular Road (A205)','Blackmont'),'North Dulwich and Denmark Hill', 'Asshai'),'Newington', 'Doune Castle'), 'Peckham Road South', 'Horn Hill'),'South East Bermondsey', 'Banefort'), 'East dulwich','Red Keep'), ' East Camberwell', 'South Park'), 'Car Parks', 'Springfield'), 'Borough', 'Hogwarts'), 'Lucas Gardens', 'Neverland'), 'Peckham West', 'Gotham City'), ' East Camberwell', 'Wonderland'), 'Dog Kennel Hill', 'Stars Hollow'), 'West Walworth', 'Jurassic Park'), 'Bermondsey', 'Atlantis'), 'South Walworth', 'Asgard'), 'The Grange', 'The Shire'), 'London Bridge', 'Emerald City'), 'Walworth','Hogsmeade'), 'Peckham','Mordor'), 'Camberwell','Lilliput')"
SQLQuery3="UPDATE Correspondence SET [CPZ Code] = CASE WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'A' THEN 'A1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'B' THEN 'A2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'C1' THEN 'C1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'C2' THEN 'C2' WHEN LEN([CPZ Code]) = 3 AND [CPZ Code] = 'CAM' THEN 'D1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'CP' THEN 'D2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'D' THEN 'E1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'E' THEN 'E2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'EC' THEN 'F1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'ED' THEN 'F2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'F' THEN 'G1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'G' THEN 'G2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'GR' THEN 'H1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'H' THEN 'H2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'HE' THEN 'I1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'HH' THEN 'I2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'J' THEN 'J1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'K' THEN 'J2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'L' THEN 'K1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'LG' THEN 'K2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'M1' THEN 'L1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'M2' THEN 'L2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'N' THEN 'M1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'NC' THEN 'M2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'P' THEN 'N1' WHEN LEN([CPZ Code]) = 3 AND [CPZ Code] = 'PCP' THEN 'N2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'PR' THEN 'O1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'PW' THEN 'O2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'Q' THEN 'P1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'R' THEN 'P2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'SB' THEN 'Q1' WHEN LEN([CPZ Code]) = 3 AND [CPZ Code] = 'SEB' THEN 'Q2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'T' THEN 'R1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'TS' THEN 'R2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'Z' THEN 'S1' ELSE [CPZ Code] END"
SQLQuery4="UPDATE Correspondence SET [Ticket #]=CONCAT('A3',STUFF([Ticket #],1,2,'')) "
sqlcon.execute(SQLQuery1)
sqlcon.execute(SQLQuery2)
sqlcon.execute(SQLQuery3)
sqlcon.execute(SQLQuery4)
The default __init__py file looks like this:
import logging
import azure.function as func
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request')
name = req.params.get('name')
if not name:
try:
req_body = req.get_json()
except ValueError:
pass
else:
name = req_body.get('name')
if name:
return func.HttpResponse(f"Hello {name}!")
else:
return func.HttpResponse(
"Please pass a name in the query string or in the request body",
status_code=400
)
I pasted my code in the azure function like so:
import logging
import azure.function as func
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request')
import sqlalchemy
import pandas as pd
sqlcon = sqlalchemy.create_engine('mssql://LBCUCRDBS100TL/AdventureWorks?driver=ODBC+Driver+17+for+SQL+Server')
path=r'C:\Users\H\Desktop\Correspondence\Correspondence_Received_with_Types.xlsx'
df=pd.read_excel(path)
df.to_sql('Correspondence',sqlcon,if_exists='append', index=False)
SQLQuery1="UPDATE Correspondence SET [Response By] = REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE (REPLACE(REPLACE([Response By], 'Junaid Dar', 'Robson de Souza') , 'Jade Buckingham','Rivaldo Vitor Borba Ferreira'), 'Danielle Wallace','Ronaldo de Assis Moreira'), 'Sadiya Nurmahomed','Wayne Mark Rooney'),'Tendai Giwa','Joseph John Cole'), 'Tiffany Heslop', 'Lionel Andrés Messi'), 'Gareth Budiar','Steven George Gerrard'),'Jeneen Nicholson','Samuel Eto''o Fils'), 'James Beaton','Carlos Alberto Tevez'), 'Kirsty Moore','Vincent Jean Mpoy Kompany'),'Kelly McDonald','Leroy Aziz Sané'), 'Muzaffer Mehmet','Gareth Frank Bale'), 'Charmaine Banerji','Ronaldo Luís Nazário de Lima'),'Paul Hunt','Luís Carlos Almeida de Cunha'), 'Jessica Ararat-David', 'Dimitar Ivanov Berbatov'), 'Anita Hayler','José María Gutiérrez Hernández'), 'Vishal Chandel','Diego Armando Maradona'), 'Saleema Panjwani','Edson Arantes do Nascimento'), 'Mohammed Uddin','Thierry Daniel Henry'),'Edward Ford','Nicolas Sébastien Anelka'), 'Gillian Sutherland', 'Zinedine Yazid Zidane'), 'Abdul Jimoh','Gabriel Omar Batistuta'),'Lee Woolward','Emmanuel Laurent Petit'), 'Antonia Akintaju','Robert Emmanuel Pires'),'Maria Dayang','Roberto Carlos da Silva Rocha'), 'Latisha McFarlane','David Robert Joseph Beckham'), 'Janine Townsend', 'Francesco Totti'), 'Vaughan Anderson-Moore','Alessandro Del Piero')"
SQLQuery2="UPDATE Correspondence SET [CPZ Name] = REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE (REPLACE(REPLACE([CPZ Name], 'North Camberwell', 'Dorne') , 'South Rotherhithe','King’s Landing'), 'Thorburn Square','Dragonstone'), 'All Non CPZ highway south of South Circular Road (A205)','Braavos'),'Housing Estates', 'Crownlands'), 'Parks Car Park', 'Winterfell'), 'South East Walworth', 'The Iron Islands'),'R - North Peckham','Casterly Rock'), 'CCTV Camera','Lannisport'),'Herne Hill', 'Volantis'),'South Camberwell', 'The Westerlands'),'South Bermondsey','Riverrun'), 'Rotherhithe', 'Highgarden'), ' Bankside','Storm’s End'), 'Trafalgar','The Kingsroad'), 'North Dulwich and Denmark Hill', 'Ashemark'), 'All Non CPZ highway north of South Circular Road (A205)','Blackmont'),'North Dulwich and Denmark Hill', 'Asshai'),'Newington', 'Doune Castle'), 'Peckham Road South', 'Horn Hill'),'South East Bermondsey', 'Banefort'), 'East dulwich','Red Keep'), ' East Camberwell', 'South Park'), 'Car Parks', 'Springfield'), 'Borough', 'Hogwarts'), 'Lucas Gardens', 'Neverland'), 'Peckham West', 'Gotham City'), ' East Camberwell', 'Wonderland'), 'Dog Kennel Hill', 'Stars Hollow'), 'West Walworth', 'Jurassic Park'), 'Bermondsey', 'Atlantis'), 'South Walworth', 'Asgard'), 'The Grange', 'The Shire'), 'London Bridge', 'Emerald City'), 'Walworth','Hogsmeade'), 'Peckham','Mordor'), 'Camberwell','Lilliput')"
SQLQuery3="UPDATE Correspondence SET [CPZ Code] = CASE WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'A' THEN 'A1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'B' THEN 'A2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'C1' THEN 'C1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'C2' THEN 'C2' WHEN LEN([CPZ Code]) = 3 AND [CPZ Code] = 'CAM' THEN 'D1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'CP' THEN 'D2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'D' THEN 'E1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'E' THEN 'E2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'EC' THEN 'F1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'ED' THEN 'F2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'F' THEN 'G1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'G' THEN 'G2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'GR' THEN 'H1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'H' THEN 'H2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'HE' THEN 'I1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'HH' THEN 'I2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'J' THEN 'J1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'K' THEN 'J2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'L' THEN 'K1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'LG' THEN 'K2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'M1' THEN 'L1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'M2' THEN 'L2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'N' THEN 'M1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'NC' THEN 'M2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'P' THEN 'N1' WHEN LEN([CPZ Code]) = 3 AND [CPZ Code] = 'PCP' THEN 'N2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'PR' THEN 'O1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'PW' THEN 'O2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'Q' THEN 'P1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'R' THEN 'P2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'SB' THEN 'Q1' WHEN LEN([CPZ Code]) = 3 AND [CPZ Code] = 'SEB' THEN 'Q2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'T' THEN 'R1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'TS' THEN 'R2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'Z' THEN 'S1' ELSE [CPZ Code] END"
SQLQuery4="UPDATE Correspondence SET [Ticket #]=CONCAT('A3',STUFF([Ticket #],1,2,'')) "
sqlcon.execute(SQLQuery1)
sqlcon.execute(SQLQuery2)
sqlcon.execute(SQLQuery3)
sqlcon.execute(SQLQuery4)
name = req.params.get('name')
if not name:
try:
req_body = req.get_json()
except ValueError:
pass
else:
name = req_body.get('name')
if name:
return func.HttpResponse(f"Hello {name}!")
else:
return func.HttpResponse(
"Please pass a name in the query string or in the request body",
status_code=400
)
When editing the code in Visual Studio Code I actually do not get an error but when I called the Azure function nothing happens. I was expecting it to access the SQL database and update the data.
| [
"Based on your code, this might be because for the code you have used. Make sure you place all the import statements at the top and run the azure functions. Make sure you are actually sending the HTTP request by hitting the function and giving the name while calling the function as per your code. Below is how it should look like,\nimport logging\nimport sqlalchemy\nimport pandas as pd\nimport azure.function as func\n\ndef main(req: func.HttpRequest) -> func.HttpResponse:\n logging.info('Python HTTP trigger function processed a request')\n \n name = req.params.get('name')\n \n sqlcon = sqlalchemy.create_engine('mssql://LBCUCRDBS100TL/AdventureWorks?driver=ODBC+Driver+17+for+SQL+Server') \n\n path=r'C:\\Users\\H\\Desktop\\Correspondence\\Correspondence_Received_with_Types.xlsx'\n\n df=pd.read_excel(path)\n\n df.to_sql('Correspondence',sqlcon,if_exists='append', index=False)\n\n SQLQuery1=\"UPDATE Correspondence SET [Response By] = REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE (REPLACE(REPLACE([Response By], 'Junaid Dar', 'Robson de Souza') , 'Jade Buckingham','Rivaldo Vitor Borba Ferreira'), 'Danielle Wallace','Ronaldo de Assis Moreira'), 'Sadiya Nurmahomed','Wayne Mark Rooney'),'Tendai Giwa','Joseph John Cole'), 'Tiffany Heslop', 'Lionel Andrés Messi'), 'Gareth Budiar','Steven George Gerrard'),'Jeneen Nicholson','Samuel Eto''o Fils'), 'James Beaton','Carlos Alberto Tevez'), 'Kirsty Moore','Vincent Jean Mpoy Kompany'),'Kelly McDonald','Leroy Aziz Sané'), 'Muzaffer Mehmet','Gareth Frank Bale'), 'Charmaine Banerji','Ronaldo Luís Nazário de Lima'),'Paul Hunt','Luís Carlos Almeida de Cunha'), 'Jessica Ararat-David', 'Dimitar Ivanov Berbatov'), 'Anita Hayler','José María Gutiérrez Hernández'), 'Vishal Chandel','Diego Armando Maradona'), 'Saleema Panjwani','Edson Arantes do Nascimento'), 'Mohammed Uddin','Thierry Daniel Henry'),'Edward Ford','Nicolas Sébastien Anelka'), 'Gillian Sutherland', 'Zinedine Yazid Zidane'), 'Abdul Jimoh','Gabriel Omar Batistuta'),'Lee Woolward','Emmanuel Laurent Petit'), 'Antonia Akintaju','Robert Emmanuel Pires'),'Maria Dayang','Roberto Carlos da Silva Rocha'), 'Latisha McFarlane','David Robert Joseph Beckham'), 'Janine Townsend', 'Francesco Totti'), 'Vaughan Anderson-Moore','Alessandro Del Piero')\"\n SQLQuery2=\"UPDATE Correspondence SET [CPZ Name] = REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE (REPLACE(REPLACE([CPZ Name], 'North Camberwell', 'Dorne') , 'South Rotherhithe','King’s Landing'), 'Thorburn Square','Dragonstone'), 'All Non CPZ highway south of South Circular Road (A205)','Braavos'),'Housing Estates', 'Crownlands'), 'Parks Car Park', 'Winterfell'), 'South East Walworth', 'The Iron Islands'),'R - North Peckham','Casterly Rock'), 'CCTV Camera','Lannisport'),'Herne Hill', 'Volantis'),'South Camberwell', 'The Westerlands'),'South Bermondsey','Riverrun'), 'Rotherhithe', 'Highgarden'), ' Bankside','Storm’s End'), 'Trafalgar','The Kingsroad'), 'North Dulwich and Denmark Hill', 'Ashemark'), 'All Non CPZ highway north of South Circular Road (A205)','Blackmont'),'North Dulwich and Denmark Hill', 'Asshai'),'Newington', 'Doune Castle'), 'Peckham Road South', 'Horn Hill'),'South East Bermondsey', 'Banefort'), 'East dulwich','Red Keep'), ' East Camberwell', 'South Park'), 'Car Parks', 'Springfield'), 'Borough', 'Hogwarts'), 'Lucas Gardens', 'Neverland'), 'Peckham West', 'Gotham City'), ' East Camberwell', 'Wonderland'), 'Dog Kennel Hill', 'Stars Hollow'), 'West Walworth', 'Jurassic Park'), 'Bermondsey', 'Atlantis'), 'South Walworth', 'Asgard'), 'The Grange', 'The Shire'), 'London Bridge', 'Emerald City'), 'Walworth','Hogsmeade'), 'Peckham','Mordor'), 'Camberwell','Lilliput')\"\n SQLQuery3=\"UPDATE Correspondence SET [CPZ Code] = CASE WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'A' THEN 'A1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'B' THEN 'A2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'C1' THEN 'C1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'C2' THEN 'C2' WHEN LEN([CPZ Code]) = 3 AND [CPZ Code] = 'CAM' THEN 'D1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'CP' THEN 'D2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'D' THEN 'E1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'E' THEN 'E2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'EC' THEN 'F1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'ED' THEN 'F2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'F' THEN 'G1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'G' THEN 'G2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'GR' THEN 'H1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'H' THEN 'H2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'HE' THEN 'I1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'HH' THEN 'I2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'J' THEN 'J1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'K' THEN 'J2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'L' THEN 'K1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'LG' THEN 'K2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'M1' THEN 'L1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'M2' THEN 'L2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'N' THEN 'M1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'NC' THEN 'M2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'P' THEN 'N1' WHEN LEN([CPZ Code]) = 3 AND [CPZ Code] = 'PCP' THEN 'N2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'PR' THEN 'O1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'PW' THEN 'O2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'Q' THEN 'P1' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'R' THEN 'P2' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'SB' THEN 'Q1' WHEN LEN([CPZ Code]) = 3 AND [CPZ Code] = 'SEB' THEN 'Q2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'T' THEN 'R1' WHEN LEN([CPZ Code]) = 2 AND [CPZ Code] = 'TS' THEN 'R2' WHEN LEN([CPZ Code]) = 1 AND [CPZ Code] = 'Z' THEN 'S1' ELSE [CPZ Code] END\"\n SQLQuery4=\"UPDATE Correspondence SET [Ticket #]=CONCAT('A3',STUFF([Ticket #],1,2,'')) \"\n\n sqlcon.execute(SQLQuery1)\n sqlcon.execute(SQLQuery2)\n sqlcon.execute(SQLQuery3)\n sqlcon.execute(SQLQuery4)\n\n if not name:\n try:\n req_body = req.get_json()\n except ValueError:\n pass\n else:\n name = req_body.get('name')\n \n if name:\n return func.HttpResponse(f\"Hello {name}!\")\n else:\n return func.HttpResponse(\n \"Please pass a name in the query string or in the request body\",\n status_code=400\n )\n\nTo run azure function and make a call to your HTTP Function\n\nYou can use the below url to call your function.\nhttp://localhost:7071/api/HttpTrigger1?name=someName\n\nIf you are running through power automate make sure you pass a name to the function to get it called.\n"
] | [
0
] | [] | [] | [
"azure",
"azure_functions",
"function",
"python"
] | stackoverflow_0074359362_azure_azure_functions_function_python.txt |
Q:
Output ACM CNAME name + value via Cloudformation
I'm trying to deploy ACM Request and want to get the CNAME name and CNAME value that's shown in the table after the request was sent - all this via Cloudformation output.
Is this even possible? or am I missing something?
If the CF resource looks like follows:
MyCertificate:
Type: "AWS::CertificateManager::Certificate"
Properties:
DomainName: "*.blstsecurity.com"
ValidationMethod: DNS
How the outputs chunk would look like ?
CertificateManager:
Description: Certificate manager CNAME output
Value: ???? ???????????
( Can't find any record related variables I can use on outputs )
A:
There is no direct way, although you can make use of CustomResource that takes in the certificate ARN as input and returns the CNAME name and value as output.
import boto3
import json
import logging
import cfnresponse
def lambda_handler(event, context):
certificate_arn = event["ResourceProperties"]["CertificateArn"]
acm = boto3.client("acm")
domain_validation_options = acm.describe_certificate(CertificateArn=certificate_arn)["DomainValidationOptions"]
if event["RequestType"] == "Create":
cfnresponse.send(event, context, cfnresponse.SUCCESS, {"DomainValidationOptions": domain_validation_options})
elif event["RequestType"] == "Update":
cfnresponse.send(event, context, cfnresponse.SUCCESS, {"DomainValidationOptions": domain_validation_options})
elif event["RequestType"] == "Delete":
cfnresponse.send(event, context, cfnresponse.SUCCESS, {})
| Output ACM CNAME name + value via Cloudformation | I'm trying to deploy ACM Request and want to get the CNAME name and CNAME value that's shown in the table after the request was sent - all this via Cloudformation output.
Is this even possible? or am I missing something?
If the CF resource looks like follows:
MyCertificate:
Type: "AWS::CertificateManager::Certificate"
Properties:
DomainName: "*.blstsecurity.com"
ValidationMethod: DNS
How the outputs chunk would look like ?
CertificateManager:
Description: Certificate manager CNAME output
Value: ???? ???????????
( Can't find any record related variables I can use on outputs )
| [
"There is no direct way, although you can make use of CustomResource that takes in the certificate ARN as input and returns the CNAME name and value as output.\nimport boto3\nimport json\nimport logging\nimport cfnresponse\n\ndef lambda_handler(event, context):\n certificate_arn = event[\"ResourceProperties\"][\"CertificateArn\"]\n acm = boto3.client(\"acm\")\n domain_validation_options = acm.describe_certificate(CertificateArn=certificate_arn)[\"DomainValidationOptions\"]\n\n if event[\"RequestType\"] == \"Create\":\n cfnresponse.send(event, context, cfnresponse.SUCCESS, {\"DomainValidationOptions\": domain_validation_options})\n elif event[\"RequestType\"] == \"Update\":\n cfnresponse.send(event, context, cfnresponse.SUCCESS, {\"DomainValidationOptions\": domain_validation_options})\n elif event[\"RequestType\"] == \"Delete\":\n cfnresponse.send(event, context, cfnresponse.SUCCESS, {})\n\n"
] | [
0
] | [] | [] | [
"amazon_cloudformation",
"amazon_web_services",
"aws_certificate_manager"
] | stackoverflow_0074675132_amazon_cloudformation_amazon_web_services_aws_certificate_manager.txt |
Q:
Flutter navigation drawer hamburger icon color change
Hamburger icon color of navigation drawer is not changing. Its black by default. I want to change the this icon color in flutter, I am stuck, help me to change this icon color. here is my code.
class Test extends StatefulWidget {
@override
_TestState createState() => new _TestState();
}
class _TestState extends State<Test> {
@override
Widget build(BuildContext context) {
return new Scaffold(
drawer: new Drawer(),
appBar: new AppBar(
title: new Text("Navigation Drawer")
),
),
);
}
}
A:
Add iconTheme to your AppBar
@override
Widget build(BuildContext context) {
return Scaffold(
drawer: Drawer(),
appBar: AppBar(
title: Text("Navigation Drawer"),
iconTheme: IconThemeData(color: Colors.green),
),
);
}
You can also check other solutions here.
A:
You can also use following in Theme's data property
Theme(
data: ThemeData(primaryIconTheme: IconThemeData(color: Colors.red)), // use this
child: Scaffold(),
)
Or
appBar: AppBar(
leading: IconButton(
icon: Icon(Icons.menu, color: Colors.red), // set your color here
onPressed: () {},
),
),
A:
To change color of your icon use this
@override
Widget build(BuildContext context) {
return new MaterialApp(
home: new Scaffold(
appBar: AppBar(title: new Text('List view example'),
leading: new Icon(Icons.menu,color: Colors.green,),
),
),
);
}
Icon(Icons.menu,color: Colors.green,) define color inside Icon
A:
Use iconTheme in Appbar like this:
Widget build(BuildContext context) {
return Scaffold(
appBar: AppBar(
title: Text("App Bar"),
iconTheme: IconThemeData(color: Colors.black),
),
drawer: Drawer(),
);
}
A:
This is the only solution to make the button clickable otherwise you need to openDrawer onTap.
AppBar(
iconTheme: const IconThemeData(
size: 40, //change size on your need
color: Colors.black, //change color on your need
),
),
A:
Using The iconTheme for Appbar is not currently working with useMaterial3 = true, And all these answers defined a leading icon for the Appbar without telling how to implement it's onPress behavior, So the best way to change the Drawers icon or it's color is this :
Declare the key for Scaffold :
final scaffoldKey = GlobalKey<ScaffoldState>();
And apply it to Scaffold:
Scaffold(
key: scaffoldKey,
drawer: Drawer()
)
Then , Apply the drawer icon like below with click action:
AppBar(
title: Text("My AppBar"),
leading: IconButton(
icon: Icon(Icons.person),
onPressed: (){
if(scaffoldKey.currentState!.isDrawerOpen){
scaffoldKey.currentState!.closeDrawer();
//close drawer, if drawer is open
}else{
scaffoldKey.currentState!.openDrawer();
//open drawer, if drawer is closed
}
},
),
)
A:
You can change it from main.dart easily this way-
return MaterialApp(
title: 'XYZ',
debugShowCheckedModeBanner: false,
theme: ThemeData(
appBarTheme: AppBarTheme(
iconTheme: IconThemeData(color: Colors.black),
actionsIconTheme: IconThemeData(color: Colors.blue),
backgroundColor: theme.backgroundColor,
elevation: 0,
),
),
| Flutter navigation drawer hamburger icon color change | Hamburger icon color of navigation drawer is not changing. Its black by default. I want to change the this icon color in flutter, I am stuck, help me to change this icon color. here is my code.
class Test extends StatefulWidget {
@override
_TestState createState() => new _TestState();
}
class _TestState extends State<Test> {
@override
Widget build(BuildContext context) {
return new Scaffold(
drawer: new Drawer(),
appBar: new AppBar(
title: new Text("Navigation Drawer")
),
),
);
}
}
| [
"Add iconTheme to your AppBar\n@override\nWidget build(BuildContext context) {\n return Scaffold(\n drawer: Drawer(),\n appBar: AppBar(\n title: Text(\"Navigation Drawer\"),\n iconTheme: IconThemeData(color: Colors.green),\n ),\n );\n}\n\nYou can also check other solutions here.\n",
"You can also use following in Theme's data property\nTheme(\n data: ThemeData(primaryIconTheme: IconThemeData(color: Colors.red)), // use this\n child: Scaffold(),\n)\n\nOr\nappBar: AppBar(\n leading: IconButton(\n icon: Icon(Icons.menu, color: Colors.red), // set your color here\n onPressed: () {},\n ),\n),\n\n",
"\nTo change color of your icon use this\n\n @override\n Widget build(BuildContext context) {\n return new MaterialApp(\n home: new Scaffold(\n appBar: AppBar(title: new Text('List view example'),\n leading: new Icon(Icons.menu,color: Colors.green,),\n ),\n),\n );\n }\n\n\nIcon(Icons.menu,color: Colors.green,) define color inside Icon\n\n",
"Use iconTheme in Appbar like this:\nWidget build(BuildContext context) {\n return Scaffold(\n appBar: AppBar(\n title: Text(\"App Bar\"),\n iconTheme: IconThemeData(color: Colors.black),\n ),\n drawer: Drawer(),\n );\n}\n\n",
"This is the only solution to make the button clickable otherwise you need to openDrawer onTap.\nAppBar(\n iconTheme: const IconThemeData(\n size: 40, //change size on your need\n color: Colors.black, //change color on your need\n ),\n ),\n\n",
"Using The iconTheme for Appbar is not currently working with useMaterial3 = true, And all these answers defined a leading icon for the Appbar without telling how to implement it's onPress behavior, So the best way to change the Drawers icon or it's color is this :\nDeclare the key for Scaffold :\nfinal scaffoldKey = GlobalKey<ScaffoldState>();\n\nAnd apply it to Scaffold:\nScaffold(\n key: scaffoldKey,\n drawer: Drawer()\n)\n\nThen , Apply the drawer icon like below with click action:\nAppBar(\n title: Text(\"My AppBar\"),\n leading: IconButton(\n icon: Icon(Icons.person),\n onPressed: (){\n if(scaffoldKey.currentState!.isDrawerOpen){\n scaffoldKey.currentState!.closeDrawer();\n //close drawer, if drawer is open\n }else{\n scaffoldKey.currentState!.openDrawer();\n //open drawer, if drawer is closed\n }\n },\n ),\n)\n\n",
"You can change it from main.dart easily this way-\nreturn MaterialApp(\n title: 'XYZ',\n debugShowCheckedModeBanner: false,\n theme: ThemeData(\n appBarTheme: AppBarTheme(\n iconTheme: IconThemeData(color: Colors.black),\n actionsIconTheme: IconThemeData(color: Colors.blue),\n backgroundColor: theme.backgroundColor,\n elevation: 0,\n ),\n\n ),\n\n"
] | [
204,
14,
5,
3,
2,
1,
0
] | [] | [] | [
"flutter",
"flutter_layout",
"navigation_drawer"
] | stackoverflow_0050580234_flutter_flutter_layout_navigation_drawer.txt |
Q:
Move 3d plot on the xy plane
I am trying to plot my data, but my 3d plot is out of bounds, meaning its above the z-axis 0 point. I want it to be on the xy plane, meaning an offset of -160. Is there a way of adding an offset?(Please check MyImage to visualise what I am trying to do)
My code:
ax = plt.figure().add_subplot(projection='3d')
ax.set(xlim=(-3, 2), ylim=(0.25, 2), zlim=(-160, 0), xlabel='CV1', ylabel='CV2', zlabel='free energy (kJ/mol)')
ax.plot_surface(xvals, yvals, zvals, edgecolor='royalblue', lw=0.8, rstride=1, cstride=1,alpha=0.3, cmap= 'plasma')
ax.contour(xvals, yvals, zvals, zdir='z', offset= -160, cmap='plasma')
plt.savefig('myplot.png')
plt.show()
(The x_vals, y_vals, z_vals are reshaped ndarrays, their shape((101,101))
MyImage
I searched the documentation of the plot_surface function but could not find a way to add some offset.
A:
To add an offset to your 3D plot in Matplotlib, you can use the zoffset parameter of the plot_surface() function. This parameter specifies the z-coordinate at which the surface is drawn.
Here's an example of how you can use the zoffset parameter to set the offset of your plot:
ax = plt.figure().add_subplot(projection='3d')
ax.set(xlim=(-3, 2), ylim=(0.25, 2), zlim=(-160, 0), xlabel='CV1', ylabel='CV2', zlabel='free energy (kJ/mol)')
# Create a Poly3DCollection object for the surface plot
surface = ax.plot_surface(xvals, yvals, zvals, edgecolor='royalblue', lw=0.8, rstride=1, cstride=1, alpha=0.3, cmap= 'plasma')
# Use the set_zoffset() method to set the z-offset of the surface plot
surface.set_zoffset(-160)
ax.contour(xvals, yvals, zvals, zdir='z', offset= -160, cmap='plasma')
plt.savefig('myplot.png')
plt.show()
Alternatively, you can also use the offset_z parameter of the plot_surface() function to set the z-offset of the surface plot.
ax = plt.figure().add_subplot(projection='3d')
ax.set(xlim=(-3, 2), ylim=(0.25, 2), zlim=(-160, 0), xlabel='CV1', ylabel='CV2', zlabel='free energy (kJ/mol)')
# Use the offset_z parameter to set the z-offset of the surface plot
ax.plot_surface(xvals, yvals, zvals, edgecolor='royalblue', lw=0.8, rstride=1, cstride=1, alpha=0.3, cmap= 'plasma', offset_z=-160)
ax.contour(xvals, yvals, zvals, zdir='z', offset= -160, cmap='plasma')
plt.savefig('myplot.png')
plt.show()
| Move 3d plot on the xy plane | I am trying to plot my data, but my 3d plot is out of bounds, meaning its above the z-axis 0 point. I want it to be on the xy plane, meaning an offset of -160. Is there a way of adding an offset?(Please check MyImage to visualise what I am trying to do)
My code:
ax = plt.figure().add_subplot(projection='3d')
ax.set(xlim=(-3, 2), ylim=(0.25, 2), zlim=(-160, 0), xlabel='CV1', ylabel='CV2', zlabel='free energy (kJ/mol)')
ax.plot_surface(xvals, yvals, zvals, edgecolor='royalblue', lw=0.8, rstride=1, cstride=1,alpha=0.3, cmap= 'plasma')
ax.contour(xvals, yvals, zvals, zdir='z', offset= -160, cmap='plasma')
plt.savefig('myplot.png')
plt.show()
(The x_vals, y_vals, z_vals are reshaped ndarrays, their shape((101,101))
MyImage
I searched the documentation of the plot_surface function but could not find a way to add some offset.
| [
"To add an offset to your 3D plot in Matplotlib, you can use the zoffset parameter of the plot_surface() function. This parameter specifies the z-coordinate at which the surface is drawn.\nHere's an example of how you can use the zoffset parameter to set the offset of your plot:\nax = plt.figure().add_subplot(projection='3d')\nax.set(xlim=(-3, 2), ylim=(0.25, 2), zlim=(-160, 0), xlabel='CV1', ylabel='CV2', zlabel='free energy (kJ/mol)')\n\n# Create a Poly3DCollection object for the surface plot\nsurface = ax.plot_surface(xvals, yvals, zvals, edgecolor='royalblue', lw=0.8, rstride=1, cstride=1, alpha=0.3, cmap= 'plasma')\n\n# Use the set_zoffset() method to set the z-offset of the surface plot\nsurface.set_zoffset(-160)\n\nax.contour(xvals, yvals, zvals, zdir='z', offset= -160, cmap='plasma')\nplt.savefig('myplot.png')\nplt.show()\n\nAlternatively, you can also use the offset_z parameter of the plot_surface() function to set the z-offset of the surface plot.\nax = plt.figure().add_subplot(projection='3d')\nax.set(xlim=(-3, 2), ylim=(0.25, 2), zlim=(-160, 0), xlabel='CV1', ylabel='CV2', zlabel='free energy (kJ/mol)')\n\n# Use the offset_z parameter to set the z-offset of the surface plot\nax.plot_surface(xvals, yvals, zvals, edgecolor='royalblue', lw=0.8, rstride=1, cstride=1, alpha=0.3, cmap= 'plasma', offset_z=-160)\n\nax.contour(xvals, yvals, zvals, zdir='z', offset= -160, cmap='plasma')\nplt.savefig('myplot.png')\nplt.show()\n\n"
] | [
0
] | [] | [] | [
"3d",
"matplotlib",
"move",
"plot",
"python"
] | stackoverflow_0074675441_3d_matplotlib_move_plot_python.txt |
Q:
Simplify if conditions in python
Is there a neat way to simplify this if statement?
I need n to be >= 2 and <= 100 and value to increase by one for every seventh step (except the first one which should be between 2 and 7 and last one which should be between 98 and 100).
if n >= 2 and n <= 7:
value = 1
elif n > 7 and n <= 14:
value = 2
elif n > 14 and n <= 21:
value = 3
elif n > 21 and n <= 28:
value = 4
elif n > 28 and n <= 35:
value = 5
elif n > 35 and n <= 42:
value = 6
elif n > 42 and n <= 49:
value = 7
elif n > 49 and n <= 56:
value = 8
elif n > 56 and n <= 63:
value = 9
elif n > 63 and n <= 70:
value = 10
elif n > 70 and n <= 77:
value = 11
elif n > 77 and n <= 84:
value = 12
elif n > 84 and n <= 91:
value = 13
elif n > 91 and n <= 98:
value = 14
elif n > 98 and n <= 100:
value = 15
EDIT:
Furthermore I'm having trouble finding the smallest number possible from the following question:
Matchsticks are ideal tools to represent numbers. A common way to represent the ten decimal digits with matchsticks is the following:
This is identical to how numbers are displayed on an ordinary alarm clock. With a given number of matchsticks you can generate a wide range of numbers. We are wondering what the smallest and largest numbers are that can be created by using all your matchsticks.
Input:
On the first line one positive number: the number of testcases, at most 100. After that per testcase:
One line with an integer n (2 <= n <= 100): the number of matchsticks you have.
Output:
Per testcase:
One line with the smallest and largest numbers you can create, separated by a single space. Both numbers should be positive and contain no leading zeroes.
I've tried multiple different ways to try to solve this problem, I'm currently trying to:
find the minimal number of digits (value) for the smallest number:
#Swifty
def values(n):
if 2 <= n <= 100:
value = (n + 6) // 7
minimum(n, value)
now i want to send value to a function minimum() which should generate all the different combinations of numbers that are the length of value. I want to store all these numbers in a list and then take min() to get the smallest one. I'm not getting this part to work, and would appreciate some inspiration.
Something to remember is that the number can't start with a 0.
A:
if 2 <= n <= 100:
value = (n+6)//7
Unless I'm mistaken, this should work for part 2:
numdict = {2:1,3:7,4:4,5:2,6:0,7:8,8:10,9:18,10:22,11:20,12:28,13:68}
def min_stick(n):
if 2 <= n <= 13:
return numdict[n]
digits = (n + 6) // 7
base = str(numdict[7+n%7])
return int(base+"8"*(digits - len(base)))
And, though this one's a no-brainer:
def max_stick(n):
if n%2:
return int("7"+"1"*((n-3)//2))
return int("1"*(n//2))
A:
Here is one way to simplify the if statement you provided in Python:
value = (n // 7) + 1 if 2 <= n <= 100 else None
This code uses integer division (the "//" operator) to divide the value of "n" by 7 and add 1 to the result. If the value of "n" is less than or equal to 100, this will give the same result as the original if statement. Otherwise, it will set "value" to None.
Here is an example of how this code would work:
# n = 14
value = (14 // 7) + 1 # This evaluates to 2
# n = 100
value = (100 // 7) + 1 # This evaluates to 15
# n = 101
value = (101 // 7) + 1 # This evaluates to None
I hope this helps! Let me know if you have any other questions.
| Simplify if conditions in python | Is there a neat way to simplify this if statement?
I need n to be >= 2 and <= 100 and value to increase by one for every seventh step (except the first one which should be between 2 and 7 and last one which should be between 98 and 100).
if n >= 2 and n <= 7:
value = 1
elif n > 7 and n <= 14:
value = 2
elif n > 14 and n <= 21:
value = 3
elif n > 21 and n <= 28:
value = 4
elif n > 28 and n <= 35:
value = 5
elif n > 35 and n <= 42:
value = 6
elif n > 42 and n <= 49:
value = 7
elif n > 49 and n <= 56:
value = 8
elif n > 56 and n <= 63:
value = 9
elif n > 63 and n <= 70:
value = 10
elif n > 70 and n <= 77:
value = 11
elif n > 77 and n <= 84:
value = 12
elif n > 84 and n <= 91:
value = 13
elif n > 91 and n <= 98:
value = 14
elif n > 98 and n <= 100:
value = 15
EDIT:
Furthermore I'm having trouble finding the smallest number possible from the following question:
Matchsticks are ideal tools to represent numbers. A common way to represent the ten decimal digits with matchsticks is the following:
This is identical to how numbers are displayed on an ordinary alarm clock. With a given number of matchsticks you can generate a wide range of numbers. We are wondering what the smallest and largest numbers are that can be created by using all your matchsticks.
Input:
On the first line one positive number: the number of testcases, at most 100. After that per testcase:
One line with an integer n (2 <= n <= 100): the number of matchsticks you have.
Output:
Per testcase:
One line with the smallest and largest numbers you can create, separated by a single space. Both numbers should be positive and contain no leading zeroes.
I've tried multiple different ways to try to solve this problem, I'm currently trying to:
find the minimal number of digits (value) for the smallest number:
#Swifty
def values(n):
if 2 <= n <= 100:
value = (n + 6) // 7
minimum(n, value)
now i want to send value to a function minimum() which should generate all the different combinations of numbers that are the length of value. I want to store all these numbers in a list and then take min() to get the smallest one. I'm not getting this part to work, and would appreciate some inspiration.
Something to remember is that the number can't start with a 0.
| [
"if 2 <= n <= 100:\n value = (n+6)//7\n\nUnless I'm mistaken, this should work for part 2:\nnumdict = {2:1,3:7,4:4,5:2,6:0,7:8,8:10,9:18,10:22,11:20,12:28,13:68}\n\ndef min_stick(n):\n if 2 <= n <= 13:\n return numdict[n]\n digits = (n + 6) // 7\n base = str(numdict[7+n%7])\n return int(base+\"8\"*(digits - len(base)))\n\nAnd, though this one's a no-brainer:\ndef max_stick(n):\n if n%2:\n return int(\"7\"+\"1\"*((n-3)//2))\n return int(\"1\"*(n//2))\n\n",
"Here is one way to simplify the if statement you provided in Python:\nvalue = (n // 7) + 1 if 2 <= n <= 100 else None\n\nThis code uses integer division (the \"//\" operator) to divide the value of \"n\" by 7 and add 1 to the result. If the value of \"n\" is less than or equal to 100, this will give the same result as the original if statement. Otherwise, it will set \"value\" to None.\nHere is an example of how this code would work:\n# n = 14\nvalue = (14 // 7) + 1 # This evaluates to 2\n\n# n = 100\nvalue = (100 // 7) + 1 # This evaluates to 15\n\n# n = 101\nvalue = (101 // 7) + 1 # This evaluates to None\n\nI hope this helps! Let me know if you have any other questions.\n"
] | [
1,
0
] | [] | [] | [
"if_statement",
"python",
"python_3.x"
] | stackoverflow_0074674910_if_statement_python_python_3.x.txt |
Q:
DiscordAPIError[40060]: Interaction has already been acknowledged. with buttons interactions discord.js V14
I 'am getting this error in all my buttons
some times it do what in the code sometimes the errors come early
DiscordAPIError[40060]: Interaction has already been acknowledged.
at SequentialHandler.runRequest (C:\Users\ryuuo\Desktop\ryuu\node_modules\@discordjs\rest\dist\index.js:659:15)
at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
at async SequentialHandler.queueRequest (C:\Users\ryuuo\Desktop\ryuu\node_modules\@discordjs\rest\dist\index.js:458:14)
at async REST.request (C:\Users\ryuuo\Desktop\ryuu\node_modules\@discordjs\rest\dist\index.js:902:22)
at async ButtonInteraction.deferReply (C:\Users\ryuuo\Desktop\ryuu\node_modules\discord.js\src\structures\interfaces\InteractionResponses.js:69:5)
at async Object.execute (C:\Users\ryuuo\Desktop\ryuu\Buttons\languague.js:11:5) {
requestBody: { files: undefined, json: { type: 5, data: { flags: 64 } } },
rawError: {
message: 'Interaction has already been acknowledged.',
code: 40060
},
code: 40060,
status: 400,
method: 'POST',
url: 'https://discord.com/api/v10/interactions/1037980437043236884/aW50ZXJhY3Rpb246MTAzNzk4MDQzNzA0MzIzNjg4NDpteWUxWkVidG8xY1VsUkpOU3Jxb3JJSnp2b2JwSlFtUUVqVnFEb0FNeEg4UUZqc1JNMWdyMkYxU3JNMDdPR2RDMEV2U3Q0TzByMkMzY1lQTk1kS09SVHh5SjEwR1daOHF6OVhWbEh0bDJYd1B6UFpIYXdMYktwczF3RXRxNVFFTA/callback'
}
I 'am using :
Discord.js Version 14.6.0
Node.js Version 18.11.0
I tried with a simple reply like
client.on(Events.InteractionCreate, async(interaction) => {
if(!interaction.isButton()) return;
if(interaction.customId === "test")
await interaction.reply({
content: "Test worked",
ephemeral: true
});
});
and getting the same error after the bot reply
I tried also with a buttons handler and same
A:
I solved it it seems that the problem was from the handler I edited the interaction filter from this
if (!interaction.isChatInputCommand) return;
to this
if (!interaction.isChatInputCommand ||
interaction.isButton() ||
interaction.isModalSubmit()) return;
and it works fine
| DiscordAPIError[40060]: Interaction has already been acknowledged. with buttons interactions discord.js V14 | I 'am getting this error in all my buttons
some times it do what in the code sometimes the errors come early
DiscordAPIError[40060]: Interaction has already been acknowledged.
at SequentialHandler.runRequest (C:\Users\ryuuo\Desktop\ryuu\node_modules\@discordjs\rest\dist\index.js:659:15)
at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
at async SequentialHandler.queueRequest (C:\Users\ryuuo\Desktop\ryuu\node_modules\@discordjs\rest\dist\index.js:458:14)
at async REST.request (C:\Users\ryuuo\Desktop\ryuu\node_modules\@discordjs\rest\dist\index.js:902:22)
at async ButtonInteraction.deferReply (C:\Users\ryuuo\Desktop\ryuu\node_modules\discord.js\src\structures\interfaces\InteractionResponses.js:69:5)
at async Object.execute (C:\Users\ryuuo\Desktop\ryuu\Buttons\languague.js:11:5) {
requestBody: { files: undefined, json: { type: 5, data: { flags: 64 } } },
rawError: {
message: 'Interaction has already been acknowledged.',
code: 40060
},
code: 40060,
status: 400,
method: 'POST',
url: 'https://discord.com/api/v10/interactions/1037980437043236884/aW50ZXJhY3Rpb246MTAzNzk4MDQzNzA0MzIzNjg4NDpteWUxWkVidG8xY1VsUkpOU3Jxb3JJSnp2b2JwSlFtUUVqVnFEb0FNeEg4UUZqc1JNMWdyMkYxU3JNMDdPR2RDMEV2U3Q0TzByMkMzY1lQTk1kS09SVHh5SjEwR1daOHF6OVhWbEh0bDJYd1B6UFpIYXdMYktwczF3RXRxNVFFTA/callback'
}
I 'am using :
Discord.js Version 14.6.0
Node.js Version 18.11.0
I tried with a simple reply like
client.on(Events.InteractionCreate, async(interaction) => {
if(!interaction.isButton()) return;
if(interaction.customId === "test")
await interaction.reply({
content: "Test worked",
ephemeral: true
});
});
and getting the same error after the bot reply
I tried also with a buttons handler and same
| [
"I solved it it seems that the problem was from the handler I edited the interaction filter from this\n if (!interaction.isChatInputCommand) return;\n\nto this\n if (!interaction.isChatInputCommand || \n interaction.isButton() ||\n interaction.isModalSubmit()) return;\n\nand it works fine\n"
] | [
0
] | [
"Check if you have duplicated events, and maybe turn if(interaction.customId === \"test\") into if(interaction.customId !== \"test\") return, that should work.\n"
] | [
-1
] | [
"discord",
"discord.js",
"discord_buttons",
"javascript"
] | stackoverflow_0074313242_discord_discord.js_discord_buttons_javascript.txt |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.