Commit 0ee13079 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
  [PKTGEN]: Remove write-only variable.
  [NETFILTER]: xt_tcpudp: fix wrong struct in udp_checkentry
  [NET_SCHED] sch_prio.c: remove duplicate call of tc_classify()
  [BRIDGE]: Fix OOPS when bridging device without ethtool.
  [BRIDGE]: Packets leaking out of disabled/blocked ports.
  [TCP]: Allow minimum RTO to be configurable via routing metrics.
  SCTP: Fix to handle invalid parameter length correctly
  SCTP: Abort on COOKIE-ECHO if backlog is exceeded.
  SCTP: Correctly disable listening when backlog is 0.
  SCTP: Do not retransmit chunks that are newer then rtt.
  SCTP: Uncomfirmed transports can't become Inactive
  SCTP: Pick the correct port when binding to 0.
  SCTP: Use net_ratelimit to suppress error messages print too fast
  SCTP: Fix to encode PROTOCOL VIOLATION error cause correctly
  SCTP: Fix sctp_addto_chunk() to add pad with correct length
  SCTP: Assign stream sequence numbers to the entire message
  SCTP: properly clean up fragment and ordering queues during FWD-TSN.
  [PKTGEN]: Fix multiqueue oops.
  [BNX2]: Add write posting comment.
  [BNX2]: Use msleep().
parents 2d8348b4 88282c6e
...@@ -3934,11 +3934,13 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) ...@@ -3934,11 +3934,13 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
/* Chip reset. */ /* Chip reset. */
REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
/* Reading back any register after chip reset will hang the
* bus on 5706 A0 and A1. The msleep below provides plenty
* of margin for write posting.
*/
if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
(CHIP_ID(bp) == CHIP_ID_5706_A1)) { (CHIP_ID(bp) == CHIP_ID_5706_A1))
current->state = TASK_UNINTERRUPTIBLE; msleep(20);
schedule_timeout(HZ / 50);
}
/* Reset takes approximate 30 usec */ /* Reset takes approximate 30 usec */
for (i = 0; i < 10; i++) { for (i = 0; i < 10; i++) {
......
...@@ -351,6 +351,8 @@ enum ...@@ -351,6 +351,8 @@ enum
#define RTAX_INITCWND RTAX_INITCWND #define RTAX_INITCWND RTAX_INITCWND
RTAX_FEATURES, RTAX_FEATURES,
#define RTAX_FEATURES RTAX_FEATURES #define RTAX_FEATURES RTAX_FEATURES
RTAX_RTO_MIN,
#define RTAX_RTO_MIN RTAX_RTO_MIN
__RTAX_MAX __RTAX_MAX
}; };
......
...@@ -214,7 +214,7 @@ struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc, ...@@ -214,7 +214,7 @@ struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc,
const struct sctp_chunk *); const struct sctp_chunk *);
struct sctp_chunk *sctp_make_shutdown_complete(const struct sctp_association *, struct sctp_chunk *sctp_make_shutdown_complete(const struct sctp_association *,
const struct sctp_chunk *); const struct sctp_chunk *);
void sctp_init_cause(struct sctp_chunk *, __be16 cause, const void *, size_t); void sctp_init_cause(struct sctp_chunk *, __be16 cause, size_t);
struct sctp_chunk *sctp_make_abort(const struct sctp_association *, struct sctp_chunk *sctp_make_abort(const struct sctp_association *,
const struct sctp_chunk *, const struct sctp_chunk *,
const size_t hint); const size_t hint);
......
...@@ -726,6 +726,7 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len, ...@@ -726,6 +726,7 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
struct iovec *data); struct iovec *data);
void sctp_chunk_free(struct sctp_chunk *); void sctp_chunk_free(struct sctp_chunk *);
void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data); void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
void *sctp_addto_param(struct sctp_chunk *, int len, const void *data);
struct sctp_chunk *sctp_chunkify(struct sk_buff *, struct sctp_chunk *sctp_chunkify(struct sk_buff *,
const struct sctp_association *, const struct sctp_association *,
struct sock *); struct sock *);
......
...@@ -83,6 +83,7 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc); ...@@ -83,6 +83,7 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc);
/* Skip over an SSN. */ /* Skip over an SSN. */
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn); void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn);
void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32);
#endif /* __sctp_ulpqueue_h__ */ #endif /* __sctp_ulpqueue_h__ */
......
...@@ -384,6 +384,11 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, ...@@ -384,6 +384,11 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
if (hold_time(br) == 0) if (hold_time(br) == 0)
return; return;
/* ignore packets unless we are using this port */
if (!(source->state == BR_STATE_LEARNING ||
source->state == BR_STATE_FORWARDING))
return;
fdb = fdb_find(head, addr); fdb = fdb_find(head, addr);
if (likely(fdb)) { if (likely(fdb)) {
/* attempt to update an entry for a local interface */ /* attempt to update an entry for a local interface */
......
...@@ -33,17 +33,17 @@ ...@@ -33,17 +33,17 @@
*/ */
static int port_cost(struct net_device *dev) static int port_cost(struct net_device *dev)
{ {
if (dev->ethtool_ops->get_settings) { if (dev->ethtool_ops && dev->ethtool_ops->get_settings) {
struct ethtool_cmd ecmd = { ETHTOOL_GSET }; struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, };
int err = dev->ethtool_ops->get_settings(dev, &ecmd);
if (!err) { if (!dev->ethtool_ops->get_settings(dev, &ecmd)) {
switch(ecmd.speed) { switch(ecmd.speed) {
case SPEED_100:
return 19;
case SPEED_1000:
return 4;
case SPEED_10000: case SPEED_10000:
return 2; return 2;
case SPEED_1000:
return 4;
case SPEED_100:
return 19;
case SPEED_10: case SPEED_10:
return 100; return 100;
} }
......
...@@ -101,9 +101,8 @@ static int br_handle_local_finish(struct sk_buff *skb) ...@@ -101,9 +101,8 @@ static int br_handle_local_finish(struct sk_buff *skb)
{ {
struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
if (p && p->state != BR_STATE_DISABLED) if (p)
br_fdb_update(p->br, p, eth_hdr(skb)->h_source); br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
return 0; /* process further */ return 0; /* process further */
} }
......
...@@ -380,7 +380,6 @@ struct pktgen_thread { ...@@ -380,7 +380,6 @@ struct pktgen_thread {
/* Field for thread to receive "posted" events terminate, stop ifs etc. */ /* Field for thread to receive "posted" events terminate, stop ifs etc. */
u32 control; u32 control;
int pid;
int cpu; int cpu;
wait_queue_head_t queue; wait_queue_head_t queue;
...@@ -3331,8 +3330,9 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) ...@@ -3331,8 +3330,9 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
} }
if ((netif_queue_stopped(odev) || if ((netif_queue_stopped(odev) ||
netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) || (pkt_dev->skb &&
need_resched()) { netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping))) ||
need_resched()) {
idle_start = getCurUs(); idle_start = getCurUs();
if (!netif_running(odev)) { if (!netif_running(odev)) {
...@@ -3462,8 +3462,6 @@ static int pktgen_thread_worker(void *arg) ...@@ -3462,8 +3462,6 @@ static int pktgen_thread_worker(void *arg)
init_waitqueue_head(&t->queue); init_waitqueue_head(&t->queue);
t->pid = current->pid;
pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid); pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid);
max_before_softirq = t->max_before_softirq; max_before_softirq = t->max_before_softirq;
......
...@@ -555,6 +555,16 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) ...@@ -555,6 +555,16 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
tcp_grow_window(sk, skb); tcp_grow_window(sk, skb);
} }
static u32 tcp_rto_min(struct sock *sk)
{
struct dst_entry *dst = __sk_dst_get(sk);
u32 rto_min = TCP_RTO_MIN;
if (dst_metric_locked(dst, RTAX_RTO_MIN))
rto_min = dst->metrics[RTAX_RTO_MIN-1];
return rto_min;
}
/* Called to compute a smoothed rtt estimate. The data fed to this /* Called to compute a smoothed rtt estimate. The data fed to this
* routine either comes from timestamps, or from segments that were * routine either comes from timestamps, or from segments that were
* known _not_ to have been retransmitted [see Karn/Partridge * known _not_ to have been retransmitted [see Karn/Partridge
...@@ -616,13 +626,13 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) ...@@ -616,13 +626,13 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
if (tp->mdev_max < tp->rttvar) if (tp->mdev_max < tp->rttvar)
tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2; tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2;
tp->rtt_seq = tp->snd_nxt; tp->rtt_seq = tp->snd_nxt;
tp->mdev_max = TCP_RTO_MIN; tp->mdev_max = tcp_rto_min(sk);
} }
} else { } else {
/* no previous measure. */ /* no previous measure. */
tp->srtt = m<<3; /* take the measured time to be rtt */ tp->srtt = m<<3; /* take the measured time to be rtt */
tp->mdev = m<<1; /* make sure rto = 3*rtt */ tp->mdev = m<<1; /* make sure rto = 3*rtt */
tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN); tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
tp->rtt_seq = tp->snd_nxt; tp->rtt_seq = tp->snd_nxt;
} }
} }
......
...@@ -188,7 +188,7 @@ udp_checkentry(const char *tablename, ...@@ -188,7 +188,7 @@ udp_checkentry(const char *tablename,
void *matchinfo, void *matchinfo,
unsigned int hook_mask) unsigned int hook_mask)
{ {
const struct xt_tcp *udpinfo = matchinfo; const struct xt_udp *udpinfo = matchinfo;
/* Must specify no unknown invflags */ /* Must specify no unknown invflags */
return !(udpinfo->invflags & ~XT_UDP_INV_MASK); return !(udpinfo->invflags & ~XT_UDP_INV_MASK);
......
...@@ -44,7 +44,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) ...@@ -44,7 +44,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
if (TC_H_MAJ(skb->priority) != sch->handle) { if (TC_H_MAJ(skb->priority) != sch->handle) {
err = tc_classify(skb, q->filter_list, &res); err = tc_classify(skb, q->filter_list, &res);
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
switch (tc_classify(skb, q->filter_list, &res)) { switch (err) {
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
case TC_ACT_QUEUED: case TC_ACT_QUEUED:
*qerr = NET_XMIT_SUCCESS; *qerr = NET_XMIT_SUCCESS;
......
...@@ -727,7 +727,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, ...@@ -727,7 +727,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
break; break;
case SCTP_TRANSPORT_DOWN: case SCTP_TRANSPORT_DOWN:
transport->state = SCTP_INACTIVE; /* if the transort was never confirmed, do not transition it
* to inactive state.
*/
if (transport->state != SCTP_UNCONFIRMED)
transport->state = SCTP_INACTIVE;
spc_state = SCTP_ADDR_UNREACHABLE; spc_state = SCTP_ADDR_UNREACHABLE;
break; break;
......
...@@ -421,6 +421,13 @@ void sctp_retransmit_mark(struct sctp_outq *q, ...@@ -421,6 +421,13 @@ void sctp_retransmit_mark(struct sctp_outq *q,
*/ */
if ((fast_retransmit && (chunk->fast_retransmit > 0)) || if ((fast_retransmit && (chunk->fast_retransmit > 0)) ||
(!fast_retransmit && !chunk->tsn_gap_acked)) { (!fast_retransmit && !chunk->tsn_gap_acked)) {
/* If this chunk was sent less then 1 rto ago, do not
* retransmit this chunk, but give the peer time
* to acknowlege it.
*/
if ((jiffies - chunk->sent_at) < transport->rto)
continue;
/* RFC 2960 6.2.1 Processing a Received SACK /* RFC 2960 6.2.1 Processing a Received SACK
* *
* C) Any time a DATA chunk is marked for * C) Any time a DATA chunk is marked for
......
...@@ -110,7 +110,7 @@ static const struct sctp_paramhdr prsctp_param = { ...@@ -110,7 +110,7 @@ static const struct sctp_paramhdr prsctp_param = {
* abort chunk. * abort chunk.
*/ */
void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
const void *payload, size_t paylen) size_t paylen)
{ {
sctp_errhdr_t err; sctp_errhdr_t err;
__u16 len; __u16 len;
...@@ -120,7 +120,6 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, ...@@ -120,7 +120,6 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
len = sizeof(sctp_errhdr_t) + paylen; len = sizeof(sctp_errhdr_t) + paylen;
err.length = htons(len); err.length = htons(len);
chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
sctp_addto_chunk(chunk, paylen, payload);
} }
/* 3.3.2 Initiation (INIT) (1) /* 3.3.2 Initiation (INIT) (1)
...@@ -780,8 +779,8 @@ struct sctp_chunk *sctp_make_abort_no_data( ...@@ -780,8 +779,8 @@ struct sctp_chunk *sctp_make_abort_no_data(
/* Put the tsn back into network byte order. */ /* Put the tsn back into network byte order. */
payload = htonl(tsn); payload = htonl(tsn);
sctp_init_cause(retval, SCTP_ERROR_NO_DATA, (const void *)&payload, sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload));
sizeof(payload)); sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints /* RFC 2960 6.4 Multi-homed SCTP Endpoints
* *
...@@ -823,7 +822,8 @@ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, ...@@ -823,7 +822,8 @@ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
goto err_copy; goto err_copy;
} }
sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen); sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen);
sctp_addto_chunk(retval, paylen, payload);
if (paylen) if (paylen)
kfree(payload); kfree(payload);
...@@ -850,15 +850,17 @@ struct sctp_chunk *sctp_make_abort_violation( ...@@ -850,15 +850,17 @@ struct sctp_chunk *sctp_make_abort_violation(
struct sctp_paramhdr phdr; struct sctp_paramhdr phdr;
retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen
+ sizeof(sctp_chunkhdr_t)); + sizeof(sctp_paramhdr_t));
if (!retval) if (!retval)
goto end; goto end;
sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, payload, paylen); sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen
+ sizeof(sctp_paramhdr_t));
phdr.type = htons(chunk->chunk_hdr->type); phdr.type = htons(chunk->chunk_hdr->type);
phdr.length = chunk->chunk_hdr->length; phdr.length = chunk->chunk_hdr->length;
sctp_addto_chunk(retval, sizeof(sctp_paramhdr_t), &phdr); sctp_addto_chunk(retval, paylen, payload);
sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr);
end: end:
return retval; return retval;
...@@ -955,7 +957,8 @@ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, ...@@ -955,7 +957,8 @@ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
if (!retval) if (!retval)
goto nodata; goto nodata;
sctp_init_cause(retval, cause_code, payload, paylen); sctp_init_cause(retval, cause_code, paylen);
sctp_addto_chunk(retval, paylen, payload);
nodata: nodata:
return retval; return retval;
...@@ -1128,7 +1131,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) ...@@ -1128,7 +1131,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
void *target; void *target;
void *padding; void *padding;
int chunklen = ntohs(chunk->chunk_hdr->length); int chunklen = ntohs(chunk->chunk_hdr->length);
int padlen = chunklen % 4; int padlen = WORD_ROUND(chunklen) - chunklen;
padding = skb_put(chunk->skb, padlen); padding = skb_put(chunk->skb, padlen);
target = skb_put(chunk->skb, len); target = skb_put(chunk->skb, len);
...@@ -1143,6 +1146,25 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) ...@@ -1143,6 +1146,25 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
return target; return target;
} }
/* Append bytes to the end of a parameter. Will panic if chunk is not big
* enough.
*/
void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data)
{
void *target;
int chunklen = ntohs(chunk->chunk_hdr->length);
target = skb_put(chunk->skb, len);
memcpy(target, data, len);
/* Adjust the chunk length field. */
chunk->chunk_hdr->length = htons(chunklen + len);
chunk->chunk_end = skb_tail_pointer(chunk->skb);
return target;
}
/* Append bytes from user space to the end of a chunk. Will panic if /* Append bytes from user space to the end of a chunk. Will panic if
* chunk is not big enough. * chunk is not big enough.
* Returns a kernel err value. * Returns a kernel err value.
...@@ -1174,25 +1196,36 @@ out: ...@@ -1174,25 +1196,36 @@ out:
*/ */
void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) void sctp_chunk_assign_ssn(struct sctp_chunk *chunk)
{ {
struct sctp_datamsg *msg;
struct sctp_chunk *lchunk;
struct sctp_stream *stream;
__u16 ssn; __u16 ssn;
__u16 sid; __u16 sid;
if (chunk->has_ssn) if (chunk->has_ssn)
return; return;
/* This is the last possible instant to assign a SSN. */ /* All fragments will be on the same stream */
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { sid = ntohs(chunk->subh.data_hdr->stream);
ssn = 0; stream = &chunk->asoc->ssnmap->out;
} else {
sid = ntohs(chunk->subh.data_hdr->stream);
if (chunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
ssn = sctp_ssn_next(&chunk->asoc->ssnmap->out, sid);
else
ssn = sctp_ssn_peek(&chunk->asoc->ssnmap->out, sid);
}
chunk->subh.data_hdr->ssn = htons(ssn); /* Now assign the sequence number to the entire message.
chunk->has_ssn = 1; * All fragments must have the same stream sequence number.
*/
msg = chunk->msg;
list_for_each_entry(lchunk, &msg->chunks, frag_list) {
if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
ssn = 0;
} else {
if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
ssn = sctp_ssn_next(stream, sid);
else
ssn = sctp_ssn_peek(stream, sid);
}
lchunk->subh.data_hdr->ssn = htons(ssn);
lchunk->has_ssn = 1;
}
} }
/* Helper function to assign a TSN if needed. This assumes that both /* Helper function to assign a TSN if needed. This assumes that both
...@@ -1466,7 +1499,8 @@ no_hmac: ...@@ -1466,7 +1499,8 @@ no_hmac:
__be32 n = htonl(usecs); __be32 n = htonl(usecs);
sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE,
&n, sizeof(n)); sizeof(n));
sctp_addto_chunk(*errp, sizeof(n), &n);
*error = -SCTP_IERROR_STALE_COOKIE; *error = -SCTP_IERROR_STALE_COOKIE;
} else } else
*error = -SCTP_IERROR_NOMEM; *error = -SCTP_IERROR_NOMEM;
...@@ -1556,7 +1590,8 @@ static int sctp_process_missing_param(const struct sctp_association *asoc, ...@@ -1556,7 +1590,8 @@ static int sctp_process_missing_param(const struct sctp_association *asoc,
report.num_missing = htonl(1); report.num_missing = htonl(1);
report.type = paramtype; report.type = paramtype;
sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM, sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM,
&report, sizeof(report)); sizeof(report));
sctp_addto_chunk(*errp, sizeof(report), &report);
} }
/* Stop processing this chunk. */ /* Stop processing this chunk. */
...@@ -1574,7 +1609,7 @@ static int sctp_process_inv_mandatory(const struct sctp_association *asoc, ...@@ -1574,7 +1609,7 @@ static int sctp_process_inv_mandatory(const struct sctp_association *asoc,
*errp = sctp_make_op_error_space(asoc, chunk, 0); *errp = sctp_make_op_error_space(asoc, chunk, 0);
if (*errp) if (*errp)
sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, NULL, 0); sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0);
/* Stop processing this chunk. */ /* Stop processing this chunk. */
return 0; return 0;
...@@ -1595,9 +1630,10 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc, ...@@ -1595,9 +1630,10 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
*errp = sctp_make_op_error_space(asoc, chunk, payload_len); *errp = sctp_make_op_error_space(asoc, chunk, payload_len);
if (*errp) { if (*errp) {
sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, error, sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION,
sizeof(error)); sizeof(error) + sizeof(sctp_paramhdr_t));
sctp_addto_chunk(*errp, sizeof(sctp_paramhdr_t), param); sctp_addto_chunk(*errp, sizeof(error), error);
sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param);
} }
return 0; return 0;
...@@ -1618,9 +1654,10 @@ static int sctp_process_hn_param(const struct sctp_association *asoc, ...@@ -1618,9 +1654,10 @@ static int sctp_process_hn_param(const struct sctp_association *asoc,
if (!*errp) if (!*errp)
*errp = sctp_make_op_error_space(asoc, chunk, len); *errp = sctp_make_op_error_space(asoc, chunk, len);
if (*errp) if (*errp) {
sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len);
param.v, len); sctp_addto_chunk(*errp, len, param.v);
}
/* Stop processing this chunk. */ /* Stop processing this chunk. */
return 0; return 0;
...@@ -1672,10 +1709,13 @@ static int sctp_process_unk_param(const struct sctp_association *asoc, ...@@ -1672,10 +1709,13 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
*errp = sctp_make_op_error_space(asoc, chunk, *errp = sctp_make_op_error_space(asoc, chunk,
ntohs(chunk->chunk_hdr->length)); ntohs(chunk->chunk_hdr->length));
if (*errp) if (*errp) {
sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
param.v,
WORD_ROUND(ntohs(param.p->length))); WORD_ROUND(ntohs(param.p->length)));
sctp_addto_chunk(*errp,
WORD_ROUND(ntohs(param.p->length)),
param.v);
}