Merge branch 'sctp-stream-schedulers'

Marcelo Ricardo Leitner says:

====================
Introduce SCTP Stream Schedulers

This patchset introduces the SCTP Stream Schedulers are defined by
https://tools.ietf.org/html/draft-ietf-tsvwg-sctp-ndata-13

It provides 3 schedulers at the moment: FCFS, Priority and Round Robin.
The other 3, Round Robin per packet, Fair Capacity and Weighted Fair
Capacity will be added later. More specifically, WFQ is required by
WebRTC Datachannels.

The draft also defines the idata chunk, allowing a usermsg to be
interrupted by another piece of idata from another stream. This patchset
*doesn't* include it. It will be posted later by Xin Long.  Its
integration with this patchset is very simple and it basically only
requires a tweak in sctp_sched_dequeue_done(), to ignore datamsg
boundaries.

The first 5 patches are a preparation for the next ones. The most
relevant patches are the 4th and 6th ones. More details are available on
each patch.

v2: changelog update on patch 3
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-10-03 16:27:29 -07:00
Родитель af14827fa3 ac1ed8b82c
Коммит 26873308b2
12 изменённых файлов: 1348 добавлений и 78 удалений

Просмотреть файл

@ -0,0 +1,72 @@
/* SCTP kernel implementation
* (C) Copyright Red Hat Inc. 2017
*
* These are definitions used by the stream schedulers, defined in RFC
* draft ndata (https://tools.ietf.org/html/draft-ietf-tsvwg-sctp-ndata-11)
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email addresses:
* lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
*/
#ifndef __sctp_stream_sched_h__
#define __sctp_stream_sched_h__
struct sctp_sched_ops {
/* Property handling for a given stream */
int (*set)(struct sctp_stream *stream, __u16 sid, __u16 value,
gfp_t gfp);
int (*get)(struct sctp_stream *stream, __u16 sid, __u16 *value);
/* Init the specific scheduler */
int (*init)(struct sctp_stream *stream);
/* Init a stream */
int (*init_sid)(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
/* Frees the entire thing */
void (*free)(struct sctp_stream *stream);
/* Enqueue a chunk */
void (*enqueue)(struct sctp_outq *q, struct sctp_datamsg *msg);
/* Dequeue a chunk */
struct sctp_chunk *(*dequeue)(struct sctp_outq *q);
/* Called only if the chunk fit the packet */
void (*dequeue_done)(struct sctp_outq *q, struct sctp_chunk *chunk);
/* Sched all chunks already enqueued */
void (*sched_all)(struct sctp_stream *steam);
/* Unched all chunks already enqueued */
void (*unsched_all)(struct sctp_stream *steam);
};
int sctp_sched_set_sched(struct sctp_association *asoc,
enum sctp_sched_type sched);
int sctp_sched_get_sched(struct sctp_association *asoc);
int sctp_sched_set_value(struct sctp_association *asoc, __u16 sid,
__u16 value, gfp_t gfp);
int sctp_sched_get_value(struct sctp_association *asoc, __u16 sid,
__u16 *value);
void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch);
void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch);
int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream);
#endif /* __sctp_stream_sched_h__ */

Просмотреть файл

@ -380,6 +380,7 @@ struct sctp_sender_hb_info {
int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
gfp_t gfp);
int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid);
void sctp_stream_free(struct sctp_stream *stream);
void sctp_stream_clear(struct sctp_stream *stream);
void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new);
@ -529,8 +530,12 @@ struct sctp_chunk {
/* How many times this chunk have been sent, for prsctp RTX policy */
int sent_count;
/* This is our link to the per-transport transmitted list. */
struct list_head transmitted_list;
union {
/* This is our link to the per-transport transmitted list. */
struct list_head transmitted_list;
/* List in specific stream outq */
struct list_head stream_list;
};
/* This field is used by chunks that hold fragmented data.
* For the first fragment this is the list that holds the rest of
@ -640,6 +645,11 @@ void sctp_init_addrs(struct sctp_chunk *, union sctp_addr *,
union sctp_addr *);
const union sctp_addr *sctp_source(const struct sctp_chunk *chunk);
static inline __u16 sctp_chunk_stream_no(struct sctp_chunk *ch)
{
return ntohs(ch->subh.data_hdr->stream);
}
enum {
SCTP_ADDR_NEW, /* new address added to assoc/ep */
SCTP_ADDR_SRC, /* address can be used as source */
@ -1012,6 +1022,9 @@ struct sctp_outq {
/* Data pending that has never been transmitted. */
struct list_head out_chunk_list;
/* Stream scheduler being used */
struct sctp_sched_ops *sched;
unsigned int out_qlen; /* Total length of queued data chunks. */
/* Error of send failed, may used in SCTP_SEND_FAILED event. */
@ -1315,11 +1328,37 @@ struct sctp_inithdr_host {
__u32 initial_tsn;
};
struct sctp_stream_priorities {
/* List of priorities scheduled */
struct list_head prio_sched;
/* List of streams scheduled */
struct list_head active;
/* The next stream stream in line */
struct sctp_stream_out_ext *next;
__u16 prio;
};
struct sctp_stream_out_ext {
__u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
__u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
struct list_head outq; /* chunks enqueued by this stream */
union {
struct {
/* Scheduled streams list */
struct list_head prio_list;
struct sctp_stream_priorities *prio_head;
};
/* Fields used by RR scheduler */
struct {
struct list_head rr_list;
};
};
};
struct sctp_stream_out {
__u16 ssn;
__u8 state;
__u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
__u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
struct sctp_stream_out_ext *ext;
};
struct sctp_stream_in {
@ -1331,6 +1370,22 @@ struct sctp_stream {
struct sctp_stream_in *in;
__u16 outcnt;
__u16 incnt;
/* Current stream being sent, if any */
struct sctp_stream_out *out_curr;
union {
/* Fields used by priority scheduler */
struct {
/* List of priorities scheduled */
struct list_head prio_list;
};
/* Fields used by RR scheduler */
struct {
/* List of streams scheduled */
struct list_head rr_list;
/* The next stream stream in line */
struct sctp_stream_out_ext *rr_next;
};
};
};
#define SCTP_STREAM_CLOSED 0x00

Просмотреть файл

@ -122,6 +122,8 @@ typedef __s32 sctp_assoc_t;
#define SCTP_RESET_ASSOC 120
#define SCTP_ADD_STREAMS 121
#define SCTP_SOCKOPT_PEELOFF_FLAGS 122
#define SCTP_STREAM_SCHEDULER 123
#define SCTP_STREAM_SCHEDULER_VALUE 124
/* PR-SCTP policies */
#define SCTP_PR_SCTP_NONE 0x0000
@ -814,6 +816,12 @@ struct sctp_assoc_value {
uint32_t assoc_value;
};
struct sctp_stream_value {
sctp_assoc_t assoc_id;
uint16_t stream_id;
uint16_t stream_value;
};
/*
* 7.2.2 Peer Address Information
*
@ -1088,4 +1096,12 @@ struct sctp_add_streams {
uint16_t sas_outstrms;
};
/* SCTP Stream schedulers */
enum sctp_sched_type {
SCTP_SS_FCFS,
SCTP_SS_PRIO,
SCTP_SS_RR,
SCTP_SS_MAX = SCTP_SS_RR
};
#endif /* _UAPI_SCTP_H */

Просмотреть файл

@ -12,7 +12,8 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
inqueue.o outqueue.o ulpqueue.o \
tsnmap.o bind_addr.o socket.o primitive.o \
output.o input.o debug.o stream.o auth.o \
offload.o
offload.o stream_sched.o stream_sched_prio.o \
stream_sched_rr.o
sctp_probe-y := probe.o

Просмотреть файл

@ -311,10 +311,10 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
if (chunk->sent_count) {
chunk->asoc->abandoned_sent[SCTP_PR_INDEX(TTL)]++;
streamout->abandoned_sent[SCTP_PR_INDEX(TTL)]++;
streamout->ext->abandoned_sent[SCTP_PR_INDEX(TTL)]++;
} else {
chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
streamout->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
streamout->ext->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
}
return 1;
} else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) &&
@ -323,7 +323,7 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
&chunk->asoc->stream.out[chunk->sinfo.sinfo_stream];
chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
streamout->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
streamout->ext->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
return 1;
} else if (!SCTP_PR_POLICY(chunk->sinfo.sinfo_flags) &&
chunk->msg->expires_at &&

Просмотреть файл

@ -50,6 +50,7 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <net/sctp/stream_sched.h>
/* Declare internal functions here. */
static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
@ -72,32 +73,38 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
/* Add data to the front of the queue. */
static inline void sctp_outq_head_data(struct sctp_outq *q,
struct sctp_chunk *ch)
struct sctp_chunk *ch)
{
struct sctp_stream_out_ext *oute;
__u16 stream;
list_add(&ch->list, &q->out_chunk_list);
q->out_qlen += ch->skb->len;
stream = sctp_chunk_stream_no(ch);
oute = q->asoc->stream.out[stream].ext;
list_add(&ch->stream_list, &oute->outq);
}
/* Take data from the front of the queue. */
static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
{
struct sctp_chunk *ch = NULL;
if (!list_empty(&q->out_chunk_list)) {
struct list_head *entry = q->out_chunk_list.next;
ch = list_entry(entry, struct sctp_chunk, list);
list_del_init(entry);
q->out_qlen -= ch->skb->len;
}
return ch;
return q->sched->dequeue(q);
}
/* Add data chunk to the end of the queue. */
static inline void sctp_outq_tail_data(struct sctp_outq *q,
struct sctp_chunk *ch)
{
struct sctp_stream_out_ext *oute;
__u16 stream;
list_add_tail(&ch->list, &q->out_chunk_list);
q->out_qlen += ch->skb->len;
stream = sctp_chunk_stream_no(ch);
oute = q->asoc->stream.out[stream].ext;
list_add_tail(&ch->stream_list, &oute->outq);
}
/*
@ -207,6 +214,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
INIT_LIST_HEAD(&q->retransmit);
INIT_LIST_HEAD(&q->sacked);
INIT_LIST_HEAD(&q->abandoned);
sctp_sched_set_sched(asoc, SCTP_SS_FCFS);
}
/* Free the outqueue structure and any related pending chunks.
@ -258,6 +266,7 @@ static void __sctp_outq_teardown(struct sctp_outq *q)
/* Throw away any leftover data chunks. */
while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
sctp_sched_dequeue_done(q, chunk);
/* Mark as send failure. */
sctp_chunk_fail(chunk, q->error);
@ -366,7 +375,7 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
streamout = &asoc->stream.out[chk->sinfo.sinfo_stream];
asoc->sent_cnt_removable--;
asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
streamout->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
streamout->ext->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
if (!chk->tsn_gap_acked) {
if (chk->transport)
@ -391,20 +400,21 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
struct sctp_outq *q = &asoc->outqueue;
struct sctp_chunk *chk, *temp;
q->sched->unsched_all(&asoc->stream);
list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
continue;
list_del_init(&chk->list);
q->out_qlen -= chk->skb->len;
sctp_sched_dequeue_common(q, chk);
asoc->sent_cnt_removable--;
asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
if (chk->sinfo.sinfo_stream < asoc->stream.outcnt) {
struct sctp_stream_out *streamout =
&asoc->stream.out[chk->sinfo.sinfo_stream];
streamout->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
streamout->ext->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
}
msg_len -= SCTP_DATA_SNDSIZE(chk) +
@ -415,6 +425,8 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
break;
}
q->sched->sched_all(&asoc->stream);
return msg_len;
}
@ -1033,22 +1045,9 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
__u32 sid = ntohs(chunk->subh.data_hdr->stream);
/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
* stream identifier.
*/
if (chunk->sinfo.sinfo_stream >= asoc->stream.outcnt) {
/* Mark as failed send. */
sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
if (asoc->peer.prsctp_capable &&
SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
asoc->sent_cnt_removable--;
sctp_chunk_free(chunk);
continue;
}
/* Has this chunk expired? */
if (sctp_chunk_abandoned(chunk)) {
sctp_sched_dequeue_done(q, chunk);
sctp_chunk_fail(chunk, 0);
sctp_chunk_free(chunk);
continue;
@ -1070,6 +1069,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
new_transport = asoc->peer.active_path;
if (new_transport->state == SCTP_UNCONFIRMED) {
WARN_ONCE(1, "Attempt to send packet on unconfirmed path.");
sctp_sched_dequeue_done(q, chunk);
sctp_chunk_fail(chunk, 0);
sctp_chunk_free(chunk);
continue;
@ -1133,6 +1133,11 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
else
asoc->stats.oodchunks++;
/* Only now it's safe to consider this
* chunk as sent, sched-wise.
*/
sctp_sched_dequeue_done(q, chunk);
break;
default:

Просмотреть файл

@ -50,6 +50,7 @@
#include <net/sock.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <net/sctp/stream_sched.h>
static int sctp_cmd_interpreter(enum sctp_event event_type,
union sctp_subtype subtype,
@ -1089,6 +1090,8 @@ static void sctp_cmd_send_msg(struct sctp_association *asoc,
list_for_each_entry(chunk, &msg->chunks, frag_list)
sctp_outq_tail(&asoc->outqueue, chunk, gfp);
asoc->outqueue.sched->enqueue(&asoc->outqueue, msg);
}

Просмотреть файл

@ -79,6 +79,7 @@
#include <net/sock.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <net/sctp/stream_sched.h>
/* Forward declarations for internal helper functions. */
static int sctp_writeable(struct sock *sk);
@ -1927,6 +1928,13 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
goto out_free;
}
/* Allocate sctp_stream_out_ext if not already done */
if (unlikely(!asoc->stream.out[sinfo->sinfo_stream].ext)) {
err = sctp_stream_init_ext(&asoc->stream, sinfo->sinfo_stream);
if (err)
goto out_free;
}
if (sctp_wspace(asoc) < msg_len)
sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc));
@ -3907,6 +3915,64 @@ out:
return retval;
}
static int sctp_setsockopt_scheduler(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_association *asoc;
struct sctp_assoc_value params;
int retval = -EINVAL;
if (optlen < sizeof(params))
goto out;
optlen = sizeof(params);
if (copy_from_user(&params, optval, optlen)) {
retval = -EFAULT;
goto out;
}
if (params.assoc_value > SCTP_SS_MAX)
goto out;
asoc = sctp_id2assoc(sk, params.assoc_id);
if (!asoc)
goto out;
retval = sctp_sched_set_sched(asoc, params.assoc_value);
out:
return retval;
}
static int sctp_setsockopt_scheduler_value(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_association *asoc;
struct sctp_stream_value params;
int retval = -EINVAL;
if (optlen < sizeof(params))
goto out;
optlen = sizeof(params);
if (copy_from_user(&params, optval, optlen)) {
retval = -EFAULT;
goto out;
}
asoc = sctp_id2assoc(sk, params.assoc_id);
if (!asoc)
goto out;
retval = sctp_sched_set_value(asoc, params.stream_id,
params.stream_value, GFP_KERNEL);
out:
return retval;
}
/* API 6.2 setsockopt(), getsockopt()
*
* Applications use setsockopt() and getsockopt() to set or retrieve
@ -4088,6 +4154,12 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
case SCTP_ADD_STREAMS:
retval = sctp_setsockopt_add_streams(sk, optval, optlen);
break;
case SCTP_STREAM_SCHEDULER:
retval = sctp_setsockopt_scheduler(sk, optval, optlen);
break;
case SCTP_STREAM_SCHEDULER_VALUE:
retval = sctp_setsockopt_scheduler_value(sk, optval, optlen);
break;
default:
retval = -ENOPROTOOPT;
break;
@ -6645,7 +6717,7 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_stream_out *streamout;
struct sctp_stream_out_ext *streamoute;
struct sctp_association *asoc;
struct sctp_prstatus params;
int retval = -EINVAL;
@ -6668,21 +6740,29 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
if (!asoc || params.sprstat_sid >= asoc->stream.outcnt)
goto out;
streamout = &asoc->stream.out[params.sprstat_sid];
streamoute = asoc->stream.out[params.sprstat_sid].ext;
if (!streamoute) {
/* Not allocated yet, means all stats are 0 */
params.sprstat_abandoned_unsent = 0;
params.sprstat_abandoned_sent = 0;
retval = 0;
goto out;
}
if (policy == SCTP_PR_SCTP_NONE) {
params.sprstat_abandoned_unsent = 0;
params.sprstat_abandoned_sent = 0;
for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
params.sprstat_abandoned_unsent +=
streamout->abandoned_unsent[policy];
streamoute->abandoned_unsent[policy];
params.sprstat_abandoned_sent +=
streamout->abandoned_sent[policy];
streamoute->abandoned_sent[policy];
}
} else {
params.sprstat_abandoned_unsent =
streamout->abandoned_unsent[__SCTP_PR_INDEX(policy)];
streamoute->abandoned_unsent[__SCTP_PR_INDEX(policy)];
params.sprstat_abandoned_sent =
streamout->abandoned_sent[__SCTP_PR_INDEX(policy)];
streamoute->abandoned_sent[__SCTP_PR_INDEX(policy)];
}
if (put_user(len, optlen) || copy_to_user(optval, &params, len)) {
@ -6778,6 +6858,85 @@ out:
return retval;
}
static int sctp_getsockopt_scheduler(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_assoc_value params;
struct sctp_association *asoc;
int retval = -EFAULT;
if (len < sizeof(params)) {
retval = -EINVAL;
goto out;
}
len = sizeof(params);
if (copy_from_user(&params, optval, len))
goto out;
asoc = sctp_id2assoc(sk, params.assoc_id);
if (!asoc) {
retval = -EINVAL;
goto out;
}
params.assoc_value = sctp_sched_get_sched(asoc);
if (put_user(len, optlen))
goto out;
if (copy_to_user(optval, &params, len))
goto out;
retval = 0;
out:
return retval;
}
static int sctp_getsockopt_scheduler_value(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_stream_value params;
struct sctp_association *asoc;
int retval = -EFAULT;
if (len < sizeof(params)) {
retval = -EINVAL;
goto out;
}
len = sizeof(params);
if (copy_from_user(&params, optval, len))
goto out;
asoc = sctp_id2assoc(sk, params.assoc_id);
if (!asoc) {
retval = -EINVAL;
goto out;
}
retval = sctp_sched_get_value(asoc, params.stream_id,
&params.stream_value);
if (retval)
goto out;
if (put_user(len, optlen)) {
retval = -EFAULT;
goto out;
}
if (copy_to_user(optval, &params, len)) {
retval = -EFAULT;
goto out;
}
out:
return retval;
}
static int sctp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
@ -6960,6 +7119,14 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
retval = sctp_getsockopt_enable_strreset(sk, len, optval,
optlen);
break;
case SCTP_STREAM_SCHEDULER:
retval = sctp_getsockopt_scheduler(sk, len, optval,
optlen);
break;
case SCTP_STREAM_SCHEDULER_VALUE:
retval = sctp_getsockopt_scheduler_value(sk, len, optval,
optlen);
break;
default:
retval = -ENOPROTOOPT;
break;

Просмотреть файл

@ -32,44 +32,181 @@
* Xin Long <lucien.xin@gmail.com>
*/
#include <linux/list.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <net/sctp/stream_sched.h>
/* Migrates chunks from stream queues to new stream queues if needed,
* but not across associations. Also, removes those chunks to streams
* higher than the new max.
*/
static void sctp_stream_outq_migrate(struct sctp_stream *stream,
struct sctp_stream *new, __u16 outcnt)
{
struct sctp_association *asoc;
struct sctp_chunk *ch, *temp;
struct sctp_outq *outq;
int i;
asoc = container_of(stream, struct sctp_association, stream);
outq = &asoc->outqueue;
list_for_each_entry_safe(ch, temp, &outq->out_chunk_list, list) {
__u16 sid = sctp_chunk_stream_no(ch);
if (sid < outcnt)
continue;
sctp_sched_dequeue_common(outq, ch);
/* No need to call dequeue_done here because
* the chunks are not scheduled by now.
*/
/* Mark as failed send. */
sctp_chunk_fail(ch, SCTP_ERROR_INV_STRM);
if (asoc->peer.prsctp_capable &&
SCTP_PR_PRIO_ENABLED(ch->sinfo.sinfo_flags))
asoc->sent_cnt_removable--;
sctp_chunk_free(ch);
}
if (new) {
/* Here we actually move the old ext stuff into the new
* buffer, because we want to keep it. Then
* sctp_stream_update will swap ->out pointers.
*/
for (i = 0; i < outcnt; i++) {
kfree(new->out[i].ext);
new->out[i].ext = stream->out[i].ext;
stream->out[i].ext = NULL;
}
}
for (i = outcnt; i < stream->outcnt; i++)
kfree(stream->out[i].ext);
}
static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
gfp_t gfp)
{
struct sctp_stream_out *out;
out = kmalloc_array(outcnt, sizeof(*out), gfp);
if (!out)
return -ENOMEM;
if (stream->out) {
memcpy(out, stream->out, min(outcnt, stream->outcnt) *
sizeof(*out));
kfree(stream->out);
}
if (outcnt > stream->outcnt)
memset(out + stream->outcnt, 0,
(outcnt - stream->outcnt) * sizeof(*out));
stream->out = out;
return 0;
}
static int sctp_stream_alloc_in(struct sctp_stream *stream, __u16 incnt,
gfp_t gfp)
{
struct sctp_stream_in *in;
in = kmalloc_array(incnt, sizeof(*stream->in), gfp);
if (!in)
return -ENOMEM;
if (stream->in) {
memcpy(in, stream->in, min(incnt, stream->incnt) *
sizeof(*in));
kfree(stream->in);
}
if (incnt > stream->incnt)
memset(in + stream->incnt, 0,
(incnt - stream->incnt) * sizeof(*in));
stream->in = in;
return 0;
}
int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
gfp_t gfp)
{
int i;
struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
int i, ret = 0;
gfp |= __GFP_NOWARN;
/* Initial stream->out size may be very big, so free it and alloc
* a new one with new outcnt to save memory.
* a new one with new outcnt to save memory if needed.
*/
kfree(stream->out);
if (outcnt == stream->outcnt)
goto in;
stream->out = kcalloc(outcnt, sizeof(*stream->out), gfp);
if (!stream->out)
return -ENOMEM;
/* Filter out chunks queued on streams that won't exist anymore */
sched->unsched_all(stream);
sctp_stream_outq_migrate(stream, NULL, outcnt);
sched->sched_all(stream);
i = sctp_stream_alloc_out(stream, outcnt, gfp);
if (i)
return i;
stream->outcnt = outcnt;
for (i = 0; i < stream->outcnt; i++)
stream->out[i].state = SCTP_STREAM_OPEN;
if (!incnt)
return 0;
sched->init(stream);
stream->in = kcalloc(incnt, sizeof(*stream->in), gfp);
if (!stream->in) {
kfree(stream->out);
stream->out = NULL;
return -ENOMEM;
in:
if (!incnt)
goto out;
i = sctp_stream_alloc_in(stream, incnt, gfp);
if (i) {
ret = -ENOMEM;
goto free;
}
stream->incnt = incnt;
goto out;
return 0;
free:
sched->free(stream);
kfree(stream->out);
stream->out = NULL;
out:
return ret;
}
int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid)
{
struct sctp_stream_out_ext *soute;
soute = kzalloc(sizeof(*soute), GFP_KERNEL);
if (!soute)
return -ENOMEM;
stream->out[sid].ext = soute;
return sctp_sched_init_sid(stream, sid, GFP_KERNEL);
}
void sctp_stream_free(struct sctp_stream *stream)
{
struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
int i;
sched->free(stream);
for (i = 0; i < stream->outcnt; i++)
kfree(stream->out[i].ext);
kfree(stream->out);
kfree(stream->in);
}
@ -87,6 +224,10 @@ void sctp_stream_clear(struct sctp_stream *stream)
void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new)
{
struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
sched->unsched_all(stream);
sctp_stream_outq_migrate(stream, new, new->outcnt);
sctp_stream_free(stream);
stream->out = new->out;
@ -94,6 +235,8 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new)
stream->outcnt = new->outcnt;
stream->incnt = new->incnt;
sched->sched_all(stream);
new->out = NULL;
new->in = NULL;
}
@ -270,15 +413,9 @@ int sctp_send_add_streams(struct sctp_association *asoc,
}
if (out) {
struct sctp_stream_out *streamout;
streamout = krealloc(stream->out, outcnt * sizeof(*streamout),
GFP_KERNEL);
if (!streamout)
retval = sctp_stream_alloc_out(stream, outcnt, GFP_KERNEL);
if (retval)
goto out;
memset(streamout + stream->outcnt, 0, out * sizeof(*streamout));
stream->out = streamout;
}
chunk = sctp_make_strreset_addstrm(asoc, out, in);
@ -601,7 +738,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
struct sctp_strreset_addstrm *addstrm = param.v;
struct sctp_stream *stream = &asoc->stream;
__u32 result = SCTP_STRRESET_DENIED;
struct sctp_stream_in *streamin;
__u32 request_seq, incnt;
__u16 in, i;
@ -648,13 +784,9 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
if (!in || incnt > SCTP_MAX_STREAM)
goto out;
streamin = krealloc(stream->in, incnt * sizeof(*streamin),
GFP_ATOMIC);
if (!streamin)
if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
goto out;
memset(streamin + stream->incnt, 0, in * sizeof(*streamin));
stream->in = streamin;
stream->incnt = incnt;
result = SCTP_STRRESET_PERFORMED;
@ -676,10 +808,10 @@ struct sctp_chunk *sctp_process_strreset_addstrm_in(
struct sctp_strreset_addstrm *addstrm = param.v;
struct sctp_stream *stream = &asoc->stream;
__u32 result = SCTP_STRRESET_DENIED;
struct sctp_stream_out *streamout;
struct sctp_chunk *chunk = NULL;
__u32 request_seq, outcnt;
__u16 out, i;
int ret;
request_seq = ntohl(addstrm->request_seq);
if (TSN_lt(asoc->strreset_inseq, request_seq) ||
@ -708,14 +840,10 @@ struct sctp_chunk *sctp_process_strreset_addstrm_in(
if (!out || outcnt > SCTP_MAX_STREAM)
goto out;
streamout = krealloc(stream->out, outcnt * sizeof(*streamout),
GFP_ATOMIC);
if (!streamout)
ret = sctp_stream_alloc_out(stream, outcnt, GFP_ATOMIC);
if (ret)
goto out;
memset(streamout + stream->outcnt, 0, out * sizeof(*streamout));
stream->out = streamout;
chunk = sctp_make_strreset_addstrm(asoc, out, 0);
if (!chunk)
goto out;

275
net/sctp/stream_sched.c Normal file
Просмотреть файл

@ -0,0 +1,275 @@
/* SCTP kernel implementation
* (C) Copyright Red Hat Inc. 2017
*
* This file is part of the SCTP kernel implementation
*
* These functions manipulate sctp stream queue/scheduling.
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email addresched(es):
* lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
*/
#include <linux/list.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <net/sctp/stream_sched.h>
/* First Come First Serve (a.k.a. FIFO)
* RFC DRAFT ndata Section 3.1
*/
static int sctp_sched_fcfs_set(struct sctp_stream *stream, __u16 sid,
__u16 value, gfp_t gfp)
{
return 0;
}
static int sctp_sched_fcfs_get(struct sctp_stream *stream, __u16 sid,
__u16 *value)
{
*value = 0;
return 0;
}
static int sctp_sched_fcfs_init(struct sctp_stream *stream)
{
return 0;
}
static int sctp_sched_fcfs_init_sid(struct sctp_stream *stream, __u16 sid,
gfp_t gfp)
{
return 0;
}
static void sctp_sched_fcfs_free(struct sctp_stream *stream)
{
}
static void sctp_sched_fcfs_enqueue(struct sctp_outq *q,
struct sctp_datamsg *msg)
{
}
static struct sctp_chunk *sctp_sched_fcfs_dequeue(struct sctp_outq *q)
{
struct sctp_stream *stream = &q->asoc->stream;
struct sctp_chunk *ch = NULL;
struct list_head *entry;
if (list_empty(&q->out_chunk_list))
goto out;
if (stream->out_curr) {
ch = list_entry(stream->out_curr->ext->outq.next,
struct sctp_chunk, stream_list);
} else {
entry = q->out_chunk_list.next;
ch = list_entry(entry, struct sctp_chunk, list);
}
sctp_sched_dequeue_common(q, ch);
out:
return ch;
}
static void sctp_sched_fcfs_dequeue_done(struct sctp_outq *q,
struct sctp_chunk *chunk)
{
}
static void sctp_sched_fcfs_sched_all(struct sctp_stream *stream)
{
}
static void sctp_sched_fcfs_unsched_all(struct sctp_stream *stream)
{
}
static struct sctp_sched_ops sctp_sched_fcfs = {
.set = sctp_sched_fcfs_set,
.get = sctp_sched_fcfs_get,
.init = sctp_sched_fcfs_init,
.init_sid = sctp_sched_fcfs_init_sid,
.free = sctp_sched_fcfs_free,
.enqueue = sctp_sched_fcfs_enqueue,
.dequeue = sctp_sched_fcfs_dequeue,
.dequeue_done = sctp_sched_fcfs_dequeue_done,
.sched_all = sctp_sched_fcfs_sched_all,
.unsched_all = sctp_sched_fcfs_unsched_all,
};
/* API to other parts of the stack */
extern struct sctp_sched_ops sctp_sched_prio;
extern struct sctp_sched_ops sctp_sched_rr;
struct sctp_sched_ops *sctp_sched_ops[] = {
&sctp_sched_fcfs,
&sctp_sched_prio,
&sctp_sched_rr,
};
int sctp_sched_set_sched(struct sctp_association *asoc,
enum sctp_sched_type sched)
{
struct sctp_sched_ops *n = sctp_sched_ops[sched];
struct sctp_sched_ops *old = asoc->outqueue.sched;
struct sctp_datamsg *msg = NULL;
struct sctp_chunk *ch;
int i, ret = 0;
if (old == n)
return ret;
if (sched > SCTP_SS_MAX)
return -EINVAL;
if (old) {
old->free(&asoc->stream);
/* Give the next scheduler a clean slate. */
for (i = 0; i < asoc->stream.outcnt; i++) {
void *p = asoc->stream.out[i].ext;
if (!p)
continue;
p += offsetofend(struct sctp_stream_out_ext, outq);
memset(p, 0, sizeof(struct sctp_stream_out_ext) -
offsetofend(struct sctp_stream_out_ext, outq));
}
}
asoc->outqueue.sched = n;
n->init(&asoc->stream);
for (i = 0; i < asoc->stream.outcnt; i++) {
if (!asoc->stream.out[i].ext)
continue;
ret = n->init_sid(&asoc->stream, i, GFP_KERNEL);
if (ret)
goto err;
}
/* We have to requeue all chunks already queued. */
list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) {
if (ch->msg == msg)
continue;
msg = ch->msg;
n->enqueue(&asoc->outqueue, msg);
}
return ret;
err:
n->free(&asoc->stream);
asoc->outqueue.sched = &sctp_sched_fcfs; /* Always safe */
return ret;
}
int sctp_sched_get_sched(struct sctp_association *asoc)
{
int i;
for (i = 0; i <= SCTP_SS_MAX; i++)
if (asoc->outqueue.sched == sctp_sched_ops[i])
return i;
return 0;
}
int sctp_sched_set_value(struct sctp_association *asoc, __u16 sid,
__u16 value, gfp_t gfp)
{
if (sid >= asoc->stream.outcnt)
return -EINVAL;
if (!asoc->stream.out[sid].ext) {
int ret;
ret = sctp_stream_init_ext(&asoc->stream, sid);
if (ret)
return ret;
}
return asoc->outqueue.sched->set(&asoc->stream, sid, value, gfp);
}
int sctp_sched_get_value(struct sctp_association *asoc, __u16 sid,
__u16 *value)
{
if (sid >= asoc->stream.outcnt)
return -EINVAL;
if (!asoc->stream.out[sid].ext)
return 0;
return asoc->outqueue.sched->get(&asoc->stream, sid, value);
}
void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch)
{
if (!list_is_last(&ch->frag_list, &ch->msg->chunks)) {
struct sctp_stream_out *sout;
__u16 sid;
/* datamsg is not finish, so save it as current one,
* in case application switch scheduler or a higher
* priority stream comes in.
*/
sid = sctp_chunk_stream_no(ch);
sout = &q->asoc->stream.out[sid];
q->asoc->stream.out_curr = sout;
return;
}
q->asoc->stream.out_curr = NULL;
q->sched->dequeue_done(q, ch);
}
/* Auxiliary functions for the schedulers */
void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch)
{
list_del_init(&ch->list);
list_del_init(&ch->stream_list);
q->out_qlen -= ch->skb->len;
}
int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp)
{
struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
INIT_LIST_HEAD(&stream->out[sid].ext->outq);
return sched->init_sid(stream, sid, gfp);
}
struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream)
{
struct sctp_association *asoc;
asoc = container_of(stream, struct sctp_association, stream);
return asoc->outqueue.sched;
}

Просмотреть файл

@ -0,0 +1,347 @@
/* SCTP kernel implementation
* (C) Copyright Red Hat Inc. 2017
*
* This file is part of the SCTP kernel implementation
*
* These functions manipulate sctp stream queue/scheduling.
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email addresched(es):
* lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
*/
#include <linux/list.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <net/sctp/stream_sched.h>
/* Priority handling
* RFC DRAFT ndata section 3.4
*/
static void sctp_sched_prio_unsched_all(struct sctp_stream *stream);
static struct sctp_stream_priorities *sctp_sched_prio_new_head(
struct sctp_stream *stream, int prio, gfp_t gfp)
{
struct sctp_stream_priorities *p;
p = kmalloc(sizeof(*p), gfp);
if (!p)
return NULL;
INIT_LIST_HEAD(&p->prio_sched);
INIT_LIST_HEAD(&p->active);
p->next = NULL;
p->prio = prio;
return p;
}
static struct sctp_stream_priorities *sctp_sched_prio_get_head(
struct sctp_stream *stream, int prio, gfp_t gfp)
{
struct sctp_stream_priorities *p;
int i;
/* Look into scheduled priorities first, as they are sorted and
* we can find it fast IF it's scheduled.
*/
list_for_each_entry(p, &stream->prio_list, prio_sched) {
if (p->prio == prio)
return p;
if (p->prio > prio)
break;
}
/* No luck. So we search on all streams now. */
for (i = 0; i < stream->outcnt; i++) {
if (!stream->out[i].ext)
continue;
p = stream->out[i].ext->prio_head;
if (!p)
/* Means all other streams won't be initialized
* as well.
*/
break;
if (p->prio == prio)
return p;
}
/* If not even there, allocate a new one. */
return sctp_sched_prio_new_head(stream, prio, gfp);
}
static void sctp_sched_prio_next_stream(struct sctp_stream_priorities *p)
{
struct list_head *pos;
pos = p->next->prio_list.next;
if (pos == &p->active)
pos = pos->next;
p->next = list_entry(pos, struct sctp_stream_out_ext, prio_list);
}
static bool sctp_sched_prio_unsched(struct sctp_stream_out_ext *soute)
{
bool scheduled = false;
if (!list_empty(&soute->prio_list)) {
struct sctp_stream_priorities *prio_head = soute->prio_head;
/* Scheduled */
scheduled = true;
if (prio_head->next == soute)
/* Try to move to the next stream */
sctp_sched_prio_next_stream(prio_head);
list_del_init(&soute->prio_list);
/* Also unsched the priority if this was the last stream */
if (list_empty(&prio_head->active)) {
list_del_init(&prio_head->prio_sched);
/* If there is no stream left, clear next */
prio_head->next = NULL;
}
}
return scheduled;
}
static void sctp_sched_prio_sched(struct sctp_stream *stream,
struct sctp_stream_out_ext *soute)
{
struct sctp_stream_priorities *prio, *prio_head;
prio_head = soute->prio_head;
/* Nothing to do if already scheduled */
if (!list_empty(&soute->prio_list))
return;
/* Schedule the stream. If there is a next, we schedule the new
* one before it, so it's the last in round robin order.
* If there isn't, we also have to schedule the priority.
*/
if (prio_head->next) {
list_add(&soute->prio_list, prio_head->next->prio_list.prev);
return;
}
list_add(&soute->prio_list, &prio_head->active);
prio_head->next = soute;
list_for_each_entry(prio, &stream->prio_list, prio_sched) {
if (prio->prio > prio_head->prio) {
list_add(&prio_head->prio_sched, prio->prio_sched.prev);
return;
}
}
list_add_tail(&prio_head->prio_sched, &stream->prio_list);
}
static int sctp_sched_prio_set(struct sctp_stream *stream, __u16 sid,
__u16 prio, gfp_t gfp)
{
struct sctp_stream_out *sout = &stream->out[sid];
struct sctp_stream_out_ext *soute = sout->ext;
struct sctp_stream_priorities *prio_head, *old;
bool reschedule = false;
int i;
prio_head = sctp_sched_prio_get_head(stream, prio, gfp);
if (!prio_head)
return -ENOMEM;
reschedule = sctp_sched_prio_unsched(soute);
old = soute->prio_head;
soute->prio_head = prio_head;
if (reschedule)
sctp_sched_prio_sched(stream, soute);
if (!old)
/* Happens when we set the priority for the first time */
return 0;
for (i = 0; i < stream->outcnt; i++) {
soute = stream->out[i].ext;
if (soute && soute->prio_head == old)
/* It's still in use, nothing else to do here. */
return 0;
}
/* No hits, we are good to free it. */
kfree(old);
return 0;
}
static int sctp_sched_prio_get(struct sctp_stream *stream, __u16 sid,
__u16 *value)
{
*value = stream->out[sid].ext->prio_head->prio;
return 0;
}
static int sctp_sched_prio_init(struct sctp_stream *stream)
{
INIT_LIST_HEAD(&stream->prio_list);
return 0;
}
static int sctp_sched_prio_init_sid(struct sctp_stream *stream, __u16 sid,
gfp_t gfp)
{
INIT_LIST_HEAD(&stream->out[sid].ext->prio_list);
return sctp_sched_prio_set(stream, sid, 0, gfp);
}
static void sctp_sched_prio_free(struct sctp_stream *stream)
{
struct sctp_stream_priorities *prio, *n;
LIST_HEAD(list);
int i;
/* As we don't keep a list of priorities, to avoid multiple
* frees we have to do it in 3 steps:
* 1. unsched everyone, so the lists are free to use in 2.
* 2. build the list of the priorities
* 3. free the list
*/
sctp_sched_prio_unsched_all(stream);
for (i = 0; i < stream->outcnt; i++) {
if (!stream->out[i].ext)
continue;
prio = stream->out[i].ext->prio_head;
if (prio && list_empty(&prio->prio_sched))
list_add(&prio->prio_sched, &list);
}
list_for_each_entry_safe(prio, n, &list, prio_sched) {
list_del_init(&prio->prio_sched);
kfree(prio);
}
}
static void sctp_sched_prio_enqueue(struct sctp_outq *q,
struct sctp_datamsg *msg)
{
struct sctp_stream *stream;
struct sctp_chunk *ch;
__u16 sid;
ch = list_first_entry(&msg->chunks, struct sctp_chunk, frag_list);
sid = sctp_chunk_stream_no(ch);
stream = &q->asoc->stream;
sctp_sched_prio_sched(stream, stream->out[sid].ext);
}
static struct sctp_chunk *sctp_sched_prio_dequeue(struct sctp_outq *q)
{
struct sctp_stream *stream = &q->asoc->stream;
struct sctp_stream_priorities *prio;
struct sctp_stream_out_ext *soute;
struct sctp_chunk *ch = NULL;
/* Bail out quickly if queue is empty */
if (list_empty(&q->out_chunk_list))
goto out;
/* Find which chunk is next. It's easy, it's either the current
* one or the first chunk on the next active stream.
*/
if (stream->out_curr) {
soute = stream->out_curr->ext;
} else {
prio = list_entry(stream->prio_list.next,
struct sctp_stream_priorities, prio_sched);
soute = prio->next;
}
ch = list_entry(soute->outq.next, struct sctp_chunk, stream_list);
sctp_sched_dequeue_common(q, ch);
out:
return ch;
}
static void sctp_sched_prio_dequeue_done(struct sctp_outq *q,
struct sctp_chunk *ch)
{
struct sctp_stream_priorities *prio;
struct sctp_stream_out_ext *soute;
__u16 sid;
/* Last chunk on that msg, move to the next stream on
* this priority.
*/
sid = sctp_chunk_stream_no(ch);
soute = q->asoc->stream.out[sid].ext;
prio = soute->prio_head;
sctp_sched_prio_next_stream(prio);
if (list_empty(&soute->outq))
sctp_sched_prio_unsched(soute);
}
static void sctp_sched_prio_sched_all(struct sctp_stream *stream)
{
struct sctp_association *asoc;
struct sctp_stream_out *sout;
struct sctp_chunk *ch;
asoc = container_of(stream, struct sctp_association, stream);
list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) {
__u16 sid;
sid = sctp_chunk_stream_no(ch);
sout = &stream->out[sid];
if (sout->ext)
sctp_sched_prio_sched(stream, sout->ext);
}
}
static void sctp_sched_prio_unsched_all(struct sctp_stream *stream)
{
struct sctp_stream_priorities *p, *tmp;
struct sctp_stream_out_ext *soute, *souttmp;
list_for_each_entry_safe(p, tmp, &stream->prio_list, prio_sched)
list_for_each_entry_safe(soute, souttmp, &p->active, prio_list)
sctp_sched_prio_unsched(soute);
}
struct sctp_sched_ops sctp_sched_prio = {
.set = sctp_sched_prio_set,
.get = sctp_sched_prio_get,
.init = sctp_sched_prio_init,
.init_sid = sctp_sched_prio_init_sid,
.free = sctp_sched_prio_free,
.enqueue = sctp_sched_prio_enqueue,
.dequeue = sctp_sched_prio_dequeue,
.dequeue_done = sctp_sched_prio_dequeue_done,
.sched_all = sctp_sched_prio_sched_all,
.unsched_all = sctp_sched_prio_unsched_all,
};

201
net/sctp/stream_sched_rr.c Normal file
Просмотреть файл

@ -0,0 +1,201 @@
/* SCTP kernel implementation
* (C) Copyright Red Hat Inc. 2017
*
* This file is part of the SCTP kernel implementation
*
* These functions manipulate sctp stream queue/scheduling.
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email addresched(es):
* lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
*/
#include <linux/list.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <net/sctp/stream_sched.h>
/* Priority handling
* RFC DRAFT ndata section 3.2
*/
static void sctp_sched_rr_unsched_all(struct sctp_stream *stream);
static void sctp_sched_rr_next_stream(struct sctp_stream *stream)
{
struct list_head *pos;
pos = stream->rr_next->rr_list.next;
if (pos == &stream->rr_list)
pos = pos->next;
stream->rr_next = list_entry(pos, struct sctp_stream_out_ext, rr_list);
}
static void sctp_sched_rr_unsched(struct sctp_stream *stream,
struct sctp_stream_out_ext *soute)
{
if (stream->rr_next == soute)
/* Try to move to the next stream */
sctp_sched_rr_next_stream(stream);
list_del_init(&soute->rr_list);
/* If we have no other stream queued, clear next */
if (list_empty(&stream->rr_list))
stream->rr_next = NULL;
}
static void sctp_sched_rr_sched(struct sctp_stream *stream,
struct sctp_stream_out_ext *soute)
{
if (!list_empty(&soute->rr_list))
/* Already scheduled. */
return;
/* Schedule the stream */
list_add_tail(&soute->rr_list, &stream->rr_list);
if (!stream->rr_next)
stream->rr_next = soute;
}
static int sctp_sched_rr_set(struct sctp_stream *stream, __u16 sid,
__u16 prio, gfp_t gfp)
{
return 0;
}
static int sctp_sched_rr_get(struct sctp_stream *stream, __u16 sid,
__u16 *value)
{
return 0;
}
static int sctp_sched_rr_init(struct sctp_stream *stream)
{
INIT_LIST_HEAD(&stream->rr_list);
stream->rr_next = NULL;
return 0;
}
static int sctp_sched_rr_init_sid(struct sctp_stream *stream, __u16 sid,
gfp_t gfp)
{
INIT_LIST_HEAD(&stream->out[sid].ext->rr_list);
return 0;
}
static void sctp_sched_rr_free(struct sctp_stream *stream)
{
sctp_sched_rr_unsched_all(stream);
}
static void sctp_sched_rr_enqueue(struct sctp_outq *q,
struct sctp_datamsg *msg)
{
struct sctp_stream *stream;
struct sctp_chunk *ch;
__u16 sid;
ch = list_first_entry(&msg->chunks, struct sctp_chunk, frag_list);
sid = sctp_chunk_stream_no(ch);
stream = &q->asoc->stream;
sctp_sched_rr_sched(stream, stream->out[sid].ext);
}
static struct sctp_chunk *sctp_sched_rr_dequeue(struct sctp_outq *q)
{
struct sctp_stream *stream = &q->asoc->stream;
struct sctp_stream_out_ext *soute;
struct sctp_chunk *ch = NULL;
/* Bail out quickly if queue is empty */
if (list_empty(&q->out_chunk_list))
goto out;
/* Find which chunk is next */
if (stream->out_curr)
soute = stream->out_curr->ext;
else
soute = stream->rr_next;
ch = list_entry(soute->outq.next, struct sctp_chunk, stream_list);
sctp_sched_dequeue_common(q, ch);
out:
return ch;
}
static void sctp_sched_rr_dequeue_done(struct sctp_outq *q,
struct sctp_chunk *ch)
{
struct sctp_stream_out_ext *soute;
__u16 sid;
/* Last chunk on that msg, move to the next stream */
sid = sctp_chunk_stream_no(ch);
soute = q->asoc->stream.out[sid].ext;
sctp_sched_rr_next_stream(&q->asoc->stream);
if (list_empty(&soute->outq))
sctp_sched_rr_unsched(&q->asoc->stream, soute);
}
static void sctp_sched_rr_sched_all(struct sctp_stream *stream)
{
struct sctp_association *asoc;
struct sctp_stream_out_ext *soute;
struct sctp_chunk *ch;
asoc = container_of(stream, struct sctp_association, stream);
list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) {
__u16 sid;
sid = sctp_chunk_stream_no(ch);
soute = stream->out[sid].ext;
if (soute)
sctp_sched_rr_sched(stream, soute);
}
}
static void sctp_sched_rr_unsched_all(struct sctp_stream *stream)
{
struct sctp_stream_out_ext *soute, *tmp;
list_for_each_entry_safe(soute, tmp, &stream->rr_list, rr_list)
sctp_sched_rr_unsched(stream, soute);
}
struct sctp_sched_ops sctp_sched_rr = {
.set = sctp_sched_rr_set,
.get = sctp_sched_rr_get,
.init = sctp_sched_rr_init,
.init_sid = sctp_sched_rr_init_sid,
.free = sctp_sched_rr_free,
.enqueue = sctp_sched_rr_enqueue,
.dequeue = sctp_sched_rr_dequeue,
.dequeue_done = sctp_sched_rr_dequeue_done,
.sched_all = sctp_sched_rr_sched_all,
.unsched_all = sctp_sched_rr_unsched_all,
};