MIPS: Makefile: Pass -march option on Loongson3A cores
[linux-drm-fsl-dcu.git] / net / core / gen_stats.c
index 2ddbce4cce144f62e8fed9dd39209fb3429b82a7..0c08062d1796337f25b9dbc2cbce68d5e3194037 100644 (file)
@@ -97,6 +97,43 @@ gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
 }
 EXPORT_SYMBOL(gnet_stats_start_copy);
 
+static void
+__gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
+                           struct gnet_stats_basic_cpu __percpu *cpu)
+{
+       int i;
+
+       for_each_possible_cpu(i) {
+               struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i);
+               unsigned int start;
+               u64 bytes;
+               u32 packets;
+
+               do {
+                       start = u64_stats_fetch_begin_irq(&bcpu->syncp);
+                       bytes = bcpu->bstats.bytes;
+                       packets = bcpu->bstats.packets;
+               } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
+
+               bstats->bytes += bytes;
+               bstats->packets += packets;
+       }
+}
+
+void
+__gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats,
+                       struct gnet_stats_basic_cpu __percpu *cpu,
+                       struct gnet_stats_basic_packed *b)
+{
+       if (cpu) {
+               __gnet_stats_copy_basic_cpu(bstats, cpu);
+       } else {
+               bstats->bytes = b->bytes;
+               bstats->packets = b->packets;
+       }
+}
+EXPORT_SYMBOL(__gnet_stats_copy_basic);
+
 /**
  * gnet_stats_copy_basic - copy basic statistics into statistic TLV
  * @d: dumping handle
@@ -109,19 +146,25 @@ EXPORT_SYMBOL(gnet_stats_start_copy);
  * if the room in the socket buffer was not sufficient.
  */
 int
-gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_packed *b)
+gnet_stats_copy_basic(struct gnet_dump *d,
+                     struct gnet_stats_basic_cpu __percpu *cpu,
+                     struct gnet_stats_basic_packed *b)
 {
+       struct gnet_stats_basic_packed bstats = {0};
+
+       __gnet_stats_copy_basic(&bstats, cpu, b);
+
        if (d->compat_tc_stats) {
-               d->tc_stats.bytes = b->bytes;
-               d->tc_stats.packets = b->packets;
+               d->tc_stats.bytes = bstats.bytes;
+               d->tc_stats.packets = bstats.packets;
        }
 
        if (d->tail) {
                struct gnet_stats_basic sb;
 
                memset(&sb, 0, sizeof(sb));
-               sb.bytes = b->bytes;
-               sb.packets = b->packets;
+               sb.bytes = bstats.bytes;
+               sb.packets = bstats.packets;
                return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb));
        }
        return 0;
@@ -172,29 +215,74 @@ gnet_stats_copy_rate_est(struct gnet_dump *d,
 }
 EXPORT_SYMBOL(gnet_stats_copy_rate_est);
 
+static void
+__gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
+                           const struct gnet_stats_queue __percpu *q)
+{
+       int i;
+
+       for_each_possible_cpu(i) {
+               const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
+
+               qstats->qlen = 0;
+               qstats->backlog += qcpu->backlog;
+               qstats->drops += qcpu->drops;
+               qstats->requeues += qcpu->requeues;
+               qstats->overlimits += qcpu->overlimits;
+       }
+}
+
+static void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
+                                   const struct gnet_stats_queue __percpu *cpu,
+                                   const struct gnet_stats_queue *q,
+                                   __u32 qlen)
+{
+       if (cpu) {
+               __gnet_stats_copy_queue_cpu(qstats, cpu);
+       } else {
+               qstats->qlen = q->qlen;
+               qstats->backlog = q->backlog;
+               qstats->drops = q->drops;
+               qstats->requeues = q->requeues;
+               qstats->overlimits = q->overlimits;
+       }
+
+       qstats->qlen = qlen;
+}
+
 /**
  * gnet_stats_copy_queue - copy queue statistics into statistics TLV
  * @d: dumping handle
+ * @cpu_q: per cpu queue statistics
  * @q: queue statistics
+ * @qlen: queue length statistics
  *
  * Appends the queue statistics to the top level TLV created by
- * gnet_stats_start_copy().
+ * gnet_stats_start_copy(). Using per cpu queue statistics if
+ * they are available.
  *
  * Returns 0 on success or -1 with the statistic lock released
  * if the room in the socket buffer was not sufficient.
  */
 int
-gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q)
+gnet_stats_copy_queue(struct gnet_dump *d,
+                     struct gnet_stats_queue __percpu *cpu_q,
+                     struct gnet_stats_queue *q, __u32 qlen)
 {
+       struct gnet_stats_queue qstats = {0};
+
+       __gnet_stats_copy_queue(&qstats, cpu_q, q, qlen);
+
        if (d->compat_tc_stats) {
-               d->tc_stats.drops = q->drops;
-               d->tc_stats.qlen = q->qlen;
-               d->tc_stats.backlog = q->backlog;
-               d->tc_stats.overlimits = q->overlimits;
+               d->tc_stats.drops = qstats.drops;
+               d->tc_stats.qlen = qstats.qlen;
+               d->tc_stats.backlog = qstats.backlog;
+               d->tc_stats.overlimits = qstats.overlimits;
        }
 
        if (d->tail)
-               return gnet_stats_copy(d, TCA_STATS_QUEUE, q, sizeof(*q));
+               return gnet_stats_copy(d, TCA_STATS_QUEUE,
+                                      &qstats, sizeof(qstats));
 
        return 0;
 }