When utilizing SRS Edge to forward traffic to the origin cluster, the traffic distribution is highly uneven. #4196
Unanswered
nighttidesy
asked this question in
Q&A
Replies: 2 comments
-
Is @winlinvip designed this way intentionally? If the traffic is not balanced, the origin server can easily reach its performance limit. Additionally, can I add a load balancer to the origin server cluster? By using the load balancer to distribute traffic to various nodes in the origin cluster, and then configuring the edge cluster to forward traffic to the address of this load balancer?
|
Beta Was this translation helpful? Give feedback.
0 replies
-
Please use new origin server, the SRS proxy, see https://ossrs.io/lts/en-us/docs/v7/doc/origin-cluster |
Beta Was this translation helpful? Give feedback.
0 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
-
Describe the bug
When utilizing SRS Edge and SRS Origin clusters, it has been observed that the majority of the stream pushing from SRS Edge is directed to the first node of the SRS Origin cluster, resulting in significantly less traffic to the other nodes and thus causing an imbalance in traffic distribution.
Version
srs4
To Reproduce
[root@k8s-ops srs-cluster]# cat srs-edge.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: srs-edge-config
data:
srs.conf: |-
listen 1935;
max_connections 1000;
daemon off;
Enable smooth exit feature.
grace_start_wait 700;
grace_final_wait 800;
force_grace_quit on;
http_api {
enabled on;
listen 1985;
}
http_server {
enabled on;
listen 8080;
}
vhost defaultVhost {
cluster {
mode remote;
origin srs-origin-0.socs srs-origin-1.socs srs-origin-2.socs;
token_traverse on;
}
http_remux {
enabled on;
mount [vhost]/[app]/[stream].flv;
hstrs on;
}
}
apiVersion: apps/v1
kind: Deployment
metadata:
name: srs-edge-deploy
labels:
app: srs-edge
spec:
replicas: 3
revisionHistoryLimit: 10
selector:
matchLabels:
app: srs-edge
template:
metadata:
annotations:
version/config: "20231228201317"
labels:
app: srs-edge
spec:
volumes:
- name: config-volume
configMap:
name: srs-edge-config
- name: av-cfs
persistentVolumeClaim:
claimName: av-cfs-pvc
containers:
- name: srs
image: ossrs/srs:4
imagePullPolicy: IfNotPresent
ports:
- containerPort: 1935
- containerPort: 1985
- containerPort: 8080
apiVersion: v1
kind: Service
metadata:
name: srs-edge-service
spec:
type: NodePort
selector:
app: srs-edge
ports:
port: 1935
protocol: TCP
targetPort: 1935
nodePort: 30228
port: 1985
protocol: TCP
targetPort: 1985
nodePort: 30229
port: 8080
protocol: TCP
targetPort: 8080
Configure SRS (Simple Real-time Streaming) origin settings.
[root@k8s-ops srs-cluster]# cat srs-origin.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: srs-origin-config
data:
srs.conf: |-
listen 1935 {
idle_timeout 600;
}
max_connections 1000;
daemon off;
srs_log_tank console;
srs_log_file ./objs/srs.log;
http_api {
enabled on;
listen 1985;
}
http_server {
enabled on;
listen 8080;
dir ./objs/nginx/html;
}
vhost defaultVhost {
cluster {
origin_cluster on;
coworkers srs-origin-0.socs:1985 srs-origin-1.socs:1985 srs-origin-2.socs:1985;
}
Configure HTTP-FLV settings.
http_remux{
enabled on;
mount [vhost]/[app]/[stream].flv;
hstrs on;
}
Configure DVR settings.
dvr {
enabled on;
dvr_path /app/nfs/livevideo/srs/[app]/[stream]_[timestamp].flv;
dvr_plan segment;
Split video files by time.
dvr_duration 600;
dvr_wait_keyframe on;
}
HTTP callback.
http_hooks {
enabled on;
AVS management side token verification interface.
on_publish http://avs-admin:9102/v1/token/check;
When the client starts streaming, the AVS management side token verification interface.
on_play http://avs-admin:9102/v1/token/check;
}
apiVersion: v1
kind: Service
metadata:
name: socs
spec:
clusterIP: None
selector:
app: srs-origin
ports:
port: 1935
protocol: TCP
targetPort: 1935
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: srs-origin
labels:
app: srs-origin
spec:
serviceName: "socs"
replicas: 3
selector:
matchLabels:
app: srs-origin
template:
metadata:
labels:
app: srs-origin
spec:
volumes:
- name: cache-volume
persistentVolumeClaim:
claimName: av2-srs-pvc
- name: config-volume
configMap:
name: srs-origin-config
- name: av-cfs
persistentVolumeClaim:
claimName: av-cfs-pvc
containers:
- name: srs
image: ossrs/srs:4
imagePullPolicy: IfNotPresent
ports:
- containerPort: 1935
- containerPort: 1985
- containerPort: 8080
volumeMounts:
- name: cache-volume
mountPath: /usr/local/srs/objs/nginx/html
readOnly: false
- name: config-volume
mountPath: /usr/local/srs/conf
- name: av-cfs
mountPath: /app/nfs/
SRS currently does not support cluster-level APIs. To retrieve streaming live broadcast information from all origin servers, it is necessary to iteratively call the API of each origin server and aggregate the data.
apiVersion: v1
kind: Service
metadata:
name: srs-api-service-0
spec:
type: NodePort
selector:
#app: srs-origin
statefulset.kubernetes.io/pod-name: srs-origin-0
ports:
port: 1985
protocol: TCP
targetPort: 1985
apiVersion: v1
kind: Service
metadata:
name: srs-api-service-1
spec:
type: NodePort
selector:
#app: srs-origin
statefulset.kubernetes.io/pod-name: srs-origin-1
ports:
port: 1985
protocol: TCP
targetPort: 1985
apiVersion: v1
kind: Service
metadata:
name: srs-api-service-2
spec:
type: NodePort
selector:
#app: srs-origin
statefulset.kubernetes.io/pod-name: srs-origin-2
ports:
port: 1985
protocol: TCP
targetPort: 1985
Could you please explain the reason for this? Is it because the edge does not support round-robin forwarding of traffic to the origin by design?
TRANS_BY_GPT4
Beta Was this translation helpful? Give feedback.
All reactions