summaryrefslogtreecommitdiff
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c45
1 files changed, 14 insertions, 31 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f4de111fa18f..b815a914b50a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1670,15 +1670,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
{
int len;
- if (trace_seq_used(s) <= s->seq.readpos)
+ if (trace_seq_used(s) <= s->readpos)
return -EBUSY;
- len = trace_seq_used(s) - s->seq.readpos;
+ len = trace_seq_used(s) - s->readpos;
if (cnt > len)
cnt = len;
- memcpy(buf, s->buffer + s->seq.readpos, cnt);
+ memcpy(buf, s->buffer + s->readpos, cnt);
- s->seq.readpos += cnt;
+ s->readpos += cnt;
return cnt;
}
@@ -3725,11 +3725,7 @@ static bool trace_safe_str(struct trace_iterator *iter, const char *str,
static const char *show_buffer(struct trace_seq *s)
{
- struct seq_buf *seq = &s->seq;
-
- seq_buf_terminate(seq);
-
- return seq->buffer;
+ return printbuf_str(&s->seq);
}
static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
@@ -6762,12 +6758,12 @@ waitagain:
trace_access_lock(iter->cpu_file);
while (trace_find_next_entry_inc(iter) != NULL) {
enum print_line_t ret;
- int save_len = iter->seq.seq.len;
+ int save_pos = iter->seq.seq.pos;
ret = print_trace_line(iter);
if (ret == TRACE_TYPE_PARTIAL_LINE) {
/* don't print partial lines */
- iter->seq.seq.len = save_len;
+ iter->seq.seq.pos = save_pos;
break;
}
if (ret != TRACE_TYPE_NO_CONSUME)
@@ -6789,7 +6785,7 @@ waitagain:
/* Now copy what we have to the user */
sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
- if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
+ if (iter->seq.readpos >= trace_seq_used(&iter->seq))
trace_seq_init(&iter->seq);
/*
@@ -6815,16 +6811,15 @@ static size_t
tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
{
size_t count;
- int save_len;
int ret;
/* Seq buffer is page-sized, exactly what we need. */
for (;;) {
- save_len = iter->seq.seq.len;
+ unsigned save_pos = iter->seq.seq.pos;
ret = print_trace_line(iter);
if (trace_seq_has_overflowed(&iter->seq)) {
- iter->seq.seq.len = save_len;
+ iter->seq.seq.pos = save_pos;
break;
}
@@ -6834,14 +6829,14 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
* anyway to be safe.
*/
if (ret == TRACE_TYPE_PARTIAL_LINE) {
- iter->seq.seq.len = save_len;
+ iter->seq.seq.pos = save_pos;
break;
}
- count = trace_seq_used(&iter->seq) - save_len;
+ count = trace_seq_used(&iter->seq) - save_pos;
if (rem < count) {
rem = 0;
- iter->seq.seq.len = save_len;
+ iter->seq.seq.pos = save_pos;
break;
}
@@ -9817,20 +9812,8 @@ static struct notifier_block trace_die_notifier = {
void
trace_printk_seq(struct trace_seq *s)
{
- /* Probably should print a warning here. */
- if (s->seq.len >= TRACE_MAX_PRINT)
- s->seq.len = TRACE_MAX_PRINT;
-
- /*
- * More paranoid code. Although the buffer size is set to
- * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
- * an extra layer of protection.
- */
- if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
- s->seq.len = s->seq.size - 1;
-
/* should be zero ended, but we are paranoid. */
- s->buffer[s->seq.len] = 0;
+ printbuf_nul_terminate(&s->seq);
printk(KERN_TRACE "%s", s->buffer);