diff --git a/server/broadcast.go b/server/broadcast.go index a3e8cf64b..5e9c13468 100755 --- a/server/broadcast.go +++ b/server/broadcast.go @@ -453,7 +453,7 @@ func (bsm *BroadcastSessionsManager) shouldSkipVerification(sessions []*Broadcas return common.RandomUintUnder(bsm.VerificationFreq) != 0 } -func NewSessionManager(ctx context.Context, node *core.LivepeerNode, params *core.StreamParameters, sel BroadcastSessionsSelectorFactory) *BroadcastSessionsManager { +func NewSessionManager(ctx context.Context, node *core.LivepeerNode, params *core.StreamParameters) *BroadcastSessionsManager { if node.Capabilities != nil { params.Capabilities.SetMinVersionConstraint(node.Capabilities.MinVersionConstraint()) } diff --git a/server/broadcast_test.go b/server/broadcast_test.go index 372d25205..21f11cb18 100644 --- a/server/broadcast_test.go +++ b/server/broadcast_test.go @@ -65,10 +65,6 @@ func StubBroadcastSession(transcoder string) *BroadcastSession { } } -func selFactoryEmpty() BroadcastSessionsSelector { - return &LIFOSelector{} -} - func bsmWithSessList(sessList []*BroadcastSession) *BroadcastSessionsManager { return bsmWithSessListExt(sessList, nil, false) } @@ -299,7 +295,7 @@ func TestNewSessionManager(t *testing.T) { // Check empty pool produces expected numOrchs - sess := NewSessionManager(context.TODO(), n, params, selFactoryEmpty) + sess := NewSessionManager(context.TODO(), n, params) assert.Equal(0, sess.trustedPool.numOrchs) assert.Equal(0, sess.untrustedPool.numOrchs) @@ -308,7 +304,7 @@ func TestNewSessionManager(t *testing.T) { n.OrchestratorPool = sd max := int(common.HTTPTimeout.Seconds()/SegLen.Seconds()) * 2 for i := 0; i < 10; i++ { - sess = NewSessionManager(context.TODO(), n, params, selFactoryEmpty) + sess = NewSessionManager(context.TODO(), n, params) if i < max { assert.Equal(i, sess.trustedPool.numOrchs) } else { diff --git a/server/mediaserver.go b/server/mediaserver.go index 975aef7fb..6b1b5ea69 100644 --- a/server/mediaserver.go +++ b/server/mediaserver.go @@ -524,17 +524,8 @@ func (s *LivepeerServer) registerConnection(ctx context.Context, rtmpStrm stream // do not obtain this lock again while initializing channel is open, it will cause deadlock if other goroutine already obtained the lock and called getActiveRtmpConnectionUnsafe() s.connectionLock.Unlock() - // initialize session manager - var stakeRdr stakeReader - if s.LivepeerNode.Eth != nil { - stakeRdr = &storeStakeReader{store: s.LivepeerNode.Database} - } - selFactory := func() BroadcastSessionsSelector { - return NewMinLSSelector(stakeRdr, SELECTOR_LATENCY_SCORE_THRESHOLD, s.LivepeerNode.SelectionAlgorithm, s.LivepeerNode.OrchPerfScore) - } - // safe, because other goroutines should be waiting on initializing channel - cxn.sessManager = NewSessionManager(ctx, s.LivepeerNode, params, selFactory) + cxn.sessManager = NewSessionManager(ctx, s.LivepeerNode, params) // populate fields and signal initializing channel s.serverLock.Lock()