Bug 1302401 - This check converts for(...; ...; ...) loops to use the new range-based loops in C++11.
MozReview-Commit-ID: 6nhX0uXNC13
--- a/js/src/gc/Barrier.h
+++ b/js/src/gc/Barrier.h
@@ -511,17 +511,17 @@ class HeapPtr : public WriteBarrieredBas
}
/*
* For HeapPtr, move semantics are equivalent to copy semantics. In
* C++, a copy constructor taking const-ref is the way to get a single
* function that will be used for both lvalue and rvalue copies, so we can
* simply omit the rvalue variant.
*/
- MOZ_IMPLICIT HeapPtr(HeapPtr<T> v) : WriteBarrieredBase<T>(std::move(v)) {
+ MOZ_IMPLICIT HeapPtr(HeapPtr<T> const &v) : WriteBarrieredBase<T>(std::move(v)) {
this->post(JS::GCPolicy<T>::initial(), this->value);
}
~HeapPtr() {
this->pre();
this->post(this->value, JS::GCPolicy<T>::initial());
}
--- a/js/src/perf/pm_linux.cpp
+++ b/js/src/perf/pm_linux.cpp
@@ -116,18 +116,18 @@ Impl::Impl()
{
}
Impl::~Impl()
{
// Close all active counter descriptors. Take care to do the group
// leader last (this may not be necessary, but it's unclear what
// happens if you close the group leader out from under a group).
- for (int i = 0; i < PerfMeasurement::NUM_MEASURABLE_EVENTS; i++) {
- int fd = this->*(kSlots[i].fd);
+ for (const auto & kSlot : kSlots) {
+ int fd = this->*(kSlot.fd);
if (fd != -1 && fd != group_leader)
close(fd);
}
if (group_leader != -1)
close(group_leader);
}
@@ -135,28 +135,28 @@ EventMask
Impl::init(EventMask toMeasure)
{
MOZ_ASSERT(group_leader == -1);
if (!toMeasure)
return EventMask(0);
EventMask measured = EventMask(0);
struct perf_event_attr attr;
- for (int i = 0; i < PerfMeasurement::NUM_MEASURABLE_EVENTS; i++) {
- if (!(toMeasure & kSlots[i].bit))
+ for (const auto & kSlot : kSlots) {
+ if (!(toMeasure & kSlot.bit))
continue;
memset(&attr, 0, sizeof(attr));
attr.size = sizeof(attr);
// Set the type and config fields to indicate the counter we
// want to enable. We want read format 0, and we're not using
// sampling, so leave those fields unset.
- attr.type = kSlots[i].type;
- attr.config = kSlots[i].config;
+ attr.type = kSlot.type;
+ attr.config = kSlot.config;
// If this will be the group leader it should start off
// disabled. Otherwise it should start off enabled (but blocked
// on the group leader).
if (group_leader == -1)
attr.disabled = 1;
// The rest of the bit fields are really poorly documented.
@@ -170,18 +170,18 @@ Impl::init(EventMask toMeasure)
int fd = sys_perf_event_open(&attr,
0 /* trace self */,
-1 /* on any cpu */,
group_leader,
0 /* no flags presently defined */);
if (fd == -1)
continue;
- measured = EventMask(measured | kSlots[i].bit);
- this->*(kSlots[i].fd) = fd;
+ measured = EventMask(measured | kSlot.bit);
+ this->*(kSlot.fd) = fd;
if (group_leader == -1)
group_leader = fd;
}
return measured;
}
void
Impl::start()
@@ -202,25 +202,25 @@ Impl::stop(PerfMeasurement* counters)
if (!running || group_leader == -1)
return;
ioctl(group_leader, PERF_EVENT_IOC_DISABLE, 0);
running = false;
// read out and reset all the counter values
- for (int i = 0; i < PerfMeasurement::NUM_MEASURABLE_EVENTS; i++) {
- int fd = this->*(kSlots[i].fd);
+ for (const auto & kSlot : kSlots) {
+ int fd = this->*(kSlot.fd);
if (fd == -1)
continue;
if (read(fd, buf, sizeof(buf)) == sizeof(uint64_t)) {
uint64_t cur;
memcpy(&cur, buf, sizeof(uint64_t));
- counters->*(kSlots[i].counter) += cur;
+ counters->*(kSlot.counter) += cur;
}
// Reset the counter regardless of whether the read did what
// we expected.
ioctl(fd, PERF_EVENT_IOC_RESET, 0);
}
}
@@ -268,21 +268,21 @@ PerfMeasurement::stop()
{
if (impl)
static_cast<Impl*>(impl)->stop(this);
}
void
PerfMeasurement::reset()
{
- for (int i = 0; i < NUM_MEASURABLE_EVENTS; i++) {
- if (eventsMeasured & kSlots[i].bit)
- this->*(kSlots[i].counter) = 0;
+ for (const auto & kSlot : kSlots) {
+ if (eventsMeasured & kSlot.bit)
+ this->*(kSlot.counter) = 0;
else
- this->*(kSlots[i].counter) = -1;
+ this->*(kSlot.counter) = -1;
}
}
bool
PerfMeasurement::canMeasureSomething()
{
// Find out if the kernel implements the performance measurement
// API. If it doesn't, syscall(__NR_perf_event_open, ...) is