@@ -691,30 +691,32 @@ static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
691
691
struct net_device * netdev )
692
692
{
693
693
unsigned int fn = PCI_FUNC (dev -> pdev -> devfn );
694
+ unsigned long flags ;
694
695
695
696
if (fn >= MLX5_MAX_PORTS )
696
697
return ;
697
698
698
- spin_lock (& lag_lock );
699
+ spin_lock_irqsave (& lag_lock , flags );
699
700
ldev -> pf [fn ].netdev = netdev ;
700
701
ldev -> tracker .netdev_state [fn ].link_up = 0 ;
701
702
ldev -> tracker .netdev_state [fn ].tx_enabled = 0 ;
702
- spin_unlock (& lag_lock );
703
+ spin_unlock_irqrestore (& lag_lock , flags );
703
704
}
704
705
705
706
static void mlx5_ldev_remove_netdev (struct mlx5_lag * ldev ,
706
707
struct net_device * netdev )
707
708
{
709
+ unsigned long flags ;
708
710
int i ;
709
711
710
- spin_lock (& lag_lock );
712
+ spin_lock_irqsave (& lag_lock , flags );
711
713
for (i = 0 ; i < MLX5_MAX_PORTS ; i ++ ) {
712
714
if (ldev -> pf [i ].netdev == netdev ) {
713
715
ldev -> pf [i ].netdev = NULL ;
714
716
break ;
715
717
}
716
718
}
717
- spin_unlock (& lag_lock );
719
+ spin_unlock_irqrestore (& lag_lock , flags );
718
720
}
719
721
720
722
static void mlx5_ldev_add_mdev (struct mlx5_lag * ldev ,
@@ -855,12 +857,13 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
855
857
bool mlx5_lag_is_roce (struct mlx5_core_dev * dev )
856
858
{
857
859
struct mlx5_lag * ldev ;
860
+ unsigned long flags ;
858
861
bool res ;
859
862
860
- spin_lock (& lag_lock );
863
+ spin_lock_irqsave (& lag_lock , flags );
861
864
ldev = mlx5_lag_dev (dev );
862
865
res = ldev && __mlx5_lag_is_roce (ldev );
863
- spin_unlock (& lag_lock );
866
+ spin_unlock_irqrestore (& lag_lock , flags );
864
867
865
868
return res ;
866
869
}
@@ -869,12 +872,13 @@ EXPORT_SYMBOL(mlx5_lag_is_roce);
869
872
bool mlx5_lag_is_active (struct mlx5_core_dev * dev )
870
873
{
871
874
struct mlx5_lag * ldev ;
875
+ unsigned long flags ;
872
876
bool res ;
873
877
874
- spin_lock (& lag_lock );
878
+ spin_lock_irqsave (& lag_lock , flags );
875
879
ldev = mlx5_lag_dev (dev );
876
880
res = ldev && __mlx5_lag_is_active (ldev );
877
- spin_unlock (& lag_lock );
881
+ spin_unlock_irqrestore (& lag_lock , flags );
878
882
879
883
return res ;
880
884
}
@@ -883,13 +887,14 @@ EXPORT_SYMBOL(mlx5_lag_is_active);
883
887
bool mlx5_lag_is_master (struct mlx5_core_dev * dev )
884
888
{
885
889
struct mlx5_lag * ldev ;
890
+ unsigned long flags ;
886
891
bool res ;
887
892
888
- spin_lock (& lag_lock );
893
+ spin_lock_irqsave (& lag_lock , flags );
889
894
ldev = mlx5_lag_dev (dev );
890
895
res = ldev && __mlx5_lag_is_active (ldev ) &&
891
896
dev == ldev -> pf [MLX5_LAG_P1 ].dev ;
892
- spin_unlock (& lag_lock );
897
+ spin_unlock_irqrestore (& lag_lock , flags );
893
898
894
899
return res ;
895
900
}
@@ -898,12 +903,13 @@ EXPORT_SYMBOL(mlx5_lag_is_master);
898
903
bool mlx5_lag_is_sriov (struct mlx5_core_dev * dev )
899
904
{
900
905
struct mlx5_lag * ldev ;
906
+ unsigned long flags ;
901
907
bool res ;
902
908
903
- spin_lock (& lag_lock );
909
+ spin_lock_irqsave (& lag_lock , flags );
904
910
ldev = mlx5_lag_dev (dev );
905
911
res = ldev && __mlx5_lag_is_sriov (ldev );
906
- spin_unlock (& lag_lock );
912
+ spin_unlock_irqrestore (& lag_lock , flags );
907
913
908
914
return res ;
909
915
}
@@ -912,12 +918,13 @@ EXPORT_SYMBOL(mlx5_lag_is_sriov);
912
918
bool mlx5_lag_is_shared_fdb (struct mlx5_core_dev * dev )
913
919
{
914
920
struct mlx5_lag * ldev ;
921
+ unsigned long flags ;
915
922
bool res ;
916
923
917
- spin_lock (& lag_lock );
924
+ spin_lock_irqsave (& lag_lock , flags );
918
925
ldev = mlx5_lag_dev (dev );
919
926
res = ldev && __mlx5_lag_is_sriov (ldev ) && ldev -> shared_fdb ;
920
- spin_unlock (& lag_lock );
927
+ spin_unlock_irqrestore (& lag_lock , flags );
921
928
922
929
return res ;
923
930
}
@@ -965,8 +972,9 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
965
972
{
966
973
struct net_device * ndev = NULL ;
967
974
struct mlx5_lag * ldev ;
975
+ unsigned long flags ;
968
976
969
- spin_lock (& lag_lock );
977
+ spin_lock_irqsave (& lag_lock , flags );
970
978
ldev = mlx5_lag_dev (dev );
971
979
972
980
if (!(ldev && __mlx5_lag_is_roce (ldev )))
@@ -983,7 +991,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
983
991
dev_hold (ndev );
984
992
985
993
unlock :
986
- spin_unlock (& lag_lock );
994
+ spin_unlock_irqrestore (& lag_lock , flags );
987
995
988
996
return ndev ;
989
997
}
@@ -993,9 +1001,10 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
993
1001
struct net_device * slave )
994
1002
{
995
1003
struct mlx5_lag * ldev ;
1004
+ unsigned long flags ;
996
1005
u8 port = 0 ;
997
1006
998
- spin_lock (& lag_lock );
1007
+ spin_lock_irqsave (& lag_lock , flags );
999
1008
ldev = mlx5_lag_dev (dev );
1000
1009
if (!(ldev && __mlx5_lag_is_roce (ldev )))
1001
1010
goto unlock ;
@@ -1008,7 +1017,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
1008
1017
port = ldev -> v2p_map [port ];
1009
1018
1010
1019
unlock :
1011
- spin_unlock (& lag_lock );
1020
+ spin_unlock_irqrestore (& lag_lock , flags );
1012
1021
return port ;
1013
1022
}
1014
1023
EXPORT_SYMBOL (mlx5_lag_get_slave_port );
@@ -1017,8 +1026,9 @@ struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
1017
1026
{
1018
1027
struct mlx5_core_dev * peer_dev = NULL ;
1019
1028
struct mlx5_lag * ldev ;
1029
+ unsigned long flags ;
1020
1030
1021
- spin_lock (& lag_lock );
1031
+ spin_lock_irqsave (& lag_lock , flags );
1022
1032
ldev = mlx5_lag_dev (dev );
1023
1033
if (!ldev )
1024
1034
goto unlock ;
@@ -1028,7 +1038,7 @@ struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
1028
1038
ldev -> pf [MLX5_LAG_P1 ].dev ;
1029
1039
1030
1040
unlock :
1031
- spin_unlock (& lag_lock );
1041
+ spin_unlock_irqrestore (& lag_lock , flags );
1032
1042
return peer_dev ;
1033
1043
}
1034
1044
EXPORT_SYMBOL (mlx5_lag_get_peer_mdev );
@@ -1041,6 +1051,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1041
1051
int outlen = MLX5_ST_SZ_BYTES (query_cong_statistics_out );
1042
1052
struct mlx5_core_dev * mdev [MLX5_MAX_PORTS ];
1043
1053
struct mlx5_lag * ldev ;
1054
+ unsigned long flags ;
1044
1055
int num_ports ;
1045
1056
int ret , i , j ;
1046
1057
void * out ;
@@ -1051,7 +1062,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1051
1062
1052
1063
memset (values , 0 , sizeof (* values ) * num_counters );
1053
1064
1054
- spin_lock (& lag_lock );
1065
+ spin_lock_irqsave (& lag_lock , flags );
1055
1066
ldev = mlx5_lag_dev (dev );
1056
1067
if (ldev && __mlx5_lag_is_active (ldev )) {
1057
1068
num_ports = MLX5_MAX_PORTS ;
@@ -1061,7 +1072,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1061
1072
num_ports = 1 ;
1062
1073
mdev [MLX5_LAG_P1 ] = dev ;
1063
1074
}
1064
- spin_unlock (& lag_lock );
1075
+ spin_unlock_irqrestore (& lag_lock , flags );
1065
1076
1066
1077
for (i = 0 ; i < num_ports ; ++ i ) {
1067
1078
u32 in [MLX5_ST_SZ_DW (query_cong_statistics_in )] = {};
0 commit comments