summaryrefslogblamecommitdiff
path: root/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
blob: 9ec5f38d38a8467850eb2ffe97e7cdc12027233a (plain) (tree)
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657






































































































































                                                                              
         





















































































































































                                                                                
                              




















































































































































































































































































































                                                                                     
                              



























































































































































































































                                                                             
                    






















































                                                                          
                    














































































































































































































































































































































































































































































































































































































































































































































































































                                                                                      
                                  












                                                                       
// SPDX-License-Identifier: GPL-2.0
/* Marvell MACSEC hardware offload driver
 *
 * Copyright (C) 2022 Marvell.
 */

#include <linux/rtnetlink.h>
#include <linux/bitfield.h>
#include <net/macsec.h>
#include "otx2_common.h"

#define MCS_TCAM0_MAC_SA_MASK		GENMASK_ULL(63, 48)
#define MCS_TCAM1_MAC_SA_MASK		GENMASK_ULL(31, 0)
#define MCS_TCAM1_ETYPE_MASK		GENMASK_ULL(47, 32)

#define MCS_SA_MAP_MEM_SA_USE		BIT_ULL(9)

#define MCS_RX_SECY_PLCY_RW_MASK	GENMASK_ULL(49, 18)
#define MCS_RX_SECY_PLCY_RP		BIT_ULL(17)
#define MCS_RX_SECY_PLCY_AUTH_ENA	BIT_ULL(16)
#define MCS_RX_SECY_PLCY_CIP		GENMASK_ULL(8, 5)
#define MCS_RX_SECY_PLCY_VAL		GENMASK_ULL(2, 1)
#define MCS_RX_SECY_PLCY_ENA		BIT_ULL(0)

#define MCS_TX_SECY_PLCY_MTU		GENMASK_ULL(43, 28)
#define MCS_TX_SECY_PLCY_ST_TCI		GENMASK_ULL(27, 22)
#define MCS_TX_SECY_PLCY_ST_OFFSET	GENMASK_ULL(21, 15)
#define MCS_TX_SECY_PLCY_INS_MODE	BIT_ULL(14)
#define MCS_TX_SECY_PLCY_AUTH_ENA	BIT_ULL(13)
#define MCS_TX_SECY_PLCY_CIP		GENMASK_ULL(5, 2)
#define MCS_TX_SECY_PLCY_PROTECT	BIT_ULL(1)
#define MCS_TX_SECY_PLCY_ENA		BIT_ULL(0)

#define MCS_GCM_AES_128			0
#define MCS_GCM_AES_256			1
#define MCS_GCM_AES_XPN_128		2
#define MCS_GCM_AES_XPN_256		3

#define MCS_TCI_ES			0x40 /* end station */
#define MCS_TCI_SC			0x20 /* SCI present */
#define MCS_TCI_SCB			0x10 /* epon */
#define MCS_TCI_E			0x08 /* encryption */
#define MCS_TCI_C			0x04 /* changed text */

static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg,
						 struct macsec_secy *secy)
{
	struct cn10k_mcs_txsc *txsc;

	list_for_each_entry(txsc, &cfg->txsc_list, entry) {
		if (txsc->sw_secy == secy)
			return txsc;
	}

	return NULL;
}

static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg,
						 struct macsec_secy *secy,
						 struct macsec_rx_sc *rx_sc)
{
	struct cn10k_mcs_rxsc *rxsc;

	list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
		if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy)
			return rxsc;
	}

	return NULL;
}

static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
{
	switch (rsrc_type) {
	case MCS_RSRC_TYPE_FLOWID:
		return "FLOW";
	case MCS_RSRC_TYPE_SC:
		return "SC";
	case MCS_RSRC_TYPE_SECY:
		return "SECY";
	case MCS_RSRC_TYPE_SA:
		return "SA";
	default:
		return "Unknown";
	};

	return "Unknown";
}

static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
				enum mcs_rsrc_type type, u16 *rsrc_id)
{
	struct mbox *mbox = &pfvf->mbox;
	struct mcs_alloc_rsrc_req *req;
	struct mcs_alloc_rsrc_rsp *rsp;
	int ret = -ENOMEM;

	mutex_lock(&mbox->lock);

	req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox);
	if (!req)
		goto fail;

	req->rsrc_type = type;
	req->rsrc_cnt  = 1;
	req->dir = dir;

	ret = otx2_sync_mbox_msg(mbox);
	if (ret)
		goto fail;

	rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
							     0, &req->hdr);
	if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt ||
	    req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) {
		ret = -EINVAL;
		goto fail;
	}

	switch (rsp->rsrc_type) {
	case MCS_RSRC_TYPE_FLOWID:
		*rsrc_id = rsp->flow_ids[0];
		break;
	case MCS_RSRC_TYPE_SC:
		*rsrc_id = rsp->sc_ids[0];
		break;
	case MCS_RSRC_TYPE_SECY:
		*rsrc_id = rsp->secy_ids[0];
		break;
	case MCS_RSRC_TYPE_SA:
		*rsrc_id = rsp->sa_ids[0];
		break;
	default:
		ret = -EINVAL;
		goto fail;
	}

	mutex_unlock(&mbox->lock);

	return 0;
fail:
	dev_err(pfvf->dev, "Failed to allocate %s %s resource\n",
		dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
	mutex_unlock(&mbox->lock);
	return ret;
}

static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
				enum mcs_rsrc_type type, u16 hw_rsrc_id,
				bool all)
{
	struct mbox *mbox = &pfvf->mbox;
	struct mcs_free_rsrc_req *req;

	mutex_lock(&mbox->lock);

	req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
	if (!req)
		goto fail;

	req->rsrc_id = hw_rsrc_id;
	req->rsrc_type = type;
	req->dir = dir;
	if (all)
		req->all = 1;

	if (otx2_sync_mbox_msg(&pfvf->mbox))
		goto fail;

	mutex_unlock(&mbox->lock);

	return;
fail:
	dev_err(pfvf->dev, "Failed to free %s %s resource\n",
		dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
	mutex_unlock(&mbox->lock);
}

static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
{
	return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id);
}

static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
{
	return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id);
}

static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id)
{
	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
}

static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id)
{
	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
}

static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf,
				   struct macsec_secy *secy, u8 hw_secy_id)
{
	struct mcs_secy_plcy_write_req *req;
	struct mbox *mbox = &pfvf->mbox;
	u64 policy;
	int ret;

	mutex_lock(&mbox->lock);

	req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
	if (!req) {
		ret = -ENOMEM;
		goto fail;
	}

	policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window);
	if (secy->replay_protect)
		policy |= MCS_RX_SECY_PLCY_RP;

	policy |= MCS_RX_SECY_PLCY_AUTH_ENA;
	policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, MCS_GCM_AES_128);
	policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames);

	policy |= MCS_RX_SECY_PLCY_ENA;

	req->plcy = policy;
	req->secy_id = hw_secy_id;
	req->dir = MCS_RX;

	ret = otx2_sync_mbox_msg(mbox);

fail:
	mutex_unlock(&mbox->lock);
	return ret;
}

static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
				     struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
{
	struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
	struct mcs_flowid_entry_write_req *req;
	struct mbox *mbox = &pfvf->mbox;
	int ret;

	mutex_lock(&mbox->lock);

	req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
	if (!req) {
		ret = -ENOMEM;
		goto fail;
	}

	req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
	req->mask[1] = ~0ULL;
	req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;

	req->mask[0] = ~0ULL;
	req->mask[2] = ~0ULL;
	req->mask[3] = ~0ULL;

	req->flow_id = rxsc->hw_flow_id;
	req->secy_id = hw_secy_id;
	req->sc_id = rxsc->hw_sc_id;
	req->dir = MCS_RX;

	if (sw_rx_sc->active)
		req->ena = 1;

	ret = otx2_sync_mbox_msg(mbox);

fail:
	mutex_unlock(&mbox->lock);
	return ret;
}

static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
				  struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
{
	struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
	struct mcs_rx_sc_cam_write_req *sc_req;
	struct mbox *mbox = &pfvf->mbox;
	int ret;

	mutex_lock(&mbox->lock);

	sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox);
	if (!sc_req) {
		ret = -ENOMEM;
		goto fail;
	}

	sc_req->sci = (__force u64)cpu_to_be64((__force u64)sw_rx_sc->sci);
	sc_req->sc_id = rxsc->hw_sc_id;
	sc_req->secy_id = hw_secy_id;

	ret = otx2_sync_mbox_msg(mbox);

fail:
	mutex_unlock(&mbox->lock);
	return ret;
}

static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf,
				      struct macsec_secy *secy,
				      struct cn10k_mcs_rxsc *rxsc,
				      u8 assoc_num, bool sa_in_use)
{
	unsigned char *src = rxsc->sa_key[assoc_num];
	struct mcs_sa_plcy_write_req *plcy_req;
	struct mcs_rx_sc_sa_map *map_req;
	struct mbox *mbox = &pfvf->mbox;
	u8 reg, key_len;
	int ret;

	mutex_lock(&mbox->lock);

	plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
	if (!plcy_req) {
		ret = -ENOMEM;
		goto fail;
	}

	map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox);
	if (!map_req) {
		otx2_mbox_reset(&mbox->mbox, 0);
		ret = -ENOMEM;
		goto fail;
	}

	for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
		memcpy((u8 *)&plcy_req->plcy[0][reg],
		       (src + reg * 8), 8);
		reg++;
	}

	plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num];
	plcy_req->sa_cnt = 1;
	plcy_req->dir = MCS_RX;

	map_req->sa_index = rxsc->hw_sa_id[assoc_num];
	map_req->sa_in_use = sa_in_use;
	map_req->sc_id = rxsc->hw_sc_id;
	map_req->an = assoc_num;

	/* Send two messages together */
	ret = otx2_sync_mbox_msg(mbox);

fail:
	mutex_unlock(&mbox->lock);
	return ret;
}

static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf,
				    struct cn10k_mcs_rxsc *rxsc,
				    u8 assoc_num, u64 next_pn)
{
	struct mcs_pn_table_write_req *req;
	struct mbox *mbox = &pfvf->mbox;
	int ret;

	mutex_lock(&mbox->lock);

	req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
	if (!req) {
		ret = -ENOMEM;
		goto fail;
	}

	req->pn_id = rxsc->hw_sa_id[assoc_num];
	req->next_pn = next_pn;
	req->dir = MCS_RX;

	ret = otx2_sync_mbox_msg(mbox);

fail:
	mutex_unlock(&mbox->lock);
	return ret;
}

static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
				   struct macsec_secy *secy,
				   struct cn10k_mcs_txsc *txsc)
{
	struct mcs_secy_plcy_write_req *req;
	struct mbox *mbox = &pfvf->mbox;
	struct macsec_tx_sc *sw_tx_sc;
	/* Insert SecTag after 12 bytes (DA+SA)*/
	u8 tag_offset = 12;
	u8 sectag_tci = 0;
	u64 policy;
	int ret;

	sw_tx_sc = &secy->tx_sc;

	mutex_lock(&mbox->lock);

	req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
	if (!req) {
		ret = -ENOMEM;
		goto fail;
	}

	if (sw_tx_sc->send_sci) {
		sectag_tci |= MCS_TCI_SC;
	} else {
		if (sw_tx_sc->end_station)
			sectag_tci |= MCS_TCI_ES;
		if (sw_tx_sc->scb)
			sectag_tci |= MCS_TCI_SCB;
	}

	if (sw_tx_sc->encrypt)
		sectag_tci |= (MCS_TCI_E | MCS_TCI_C);

	policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
	/* Write SecTag excluding AN bits(1..0) */
	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
	policy |= MCS_TX_SECY_PLCY_INS_MODE;
	policy |= MCS_TX_SECY_PLCY_AUTH_ENA;
	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, MCS_GCM_AES_128);

	if (secy->protect_frames)
		policy |= MCS_TX_SECY_PLCY_PROTECT;

	/* If the encodingsa does not exist/active and protect is
	 * not set then frames can be sent out as it is. Hence enable
	 * the policy irrespective of secy operational when !protect.
	 */
	if (!secy->protect_frames || secy->operational)
		policy |= MCS_TX_SECY_PLCY_ENA;

	req->plcy = policy;
	req->secy_id = txsc->hw_secy_id_tx;
	req->dir = MCS_TX;

	ret = otx2_sync_mbox_msg(mbox);

fail:
	mutex_unlock(&mbox->lock);
	return ret;
}

static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf,
				     struct macsec_secy *secy,
				     struct cn10k_mcs_txsc *txsc)
{
	struct mcs_flowid_entry_write_req *req;
	struct mbox *mbox = &pfvf->mbox;
	u64 mac_sa;
	int ret;

	mutex_lock(&mbox->lock);

	req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
	if (!req) {
		ret = -ENOMEM;
		goto fail;
	}

	mac_sa = ether_addr_to_u64(secy->netdev->dev_addr);

	req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa);
	req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16);

	req->mask[0] = ~0ULL;
	req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK;

	req->mask[1] = ~0ULL;
	req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK;

	req->mask[2] = ~0ULL;
	req->mask[3] = ~0ULL;

	req->flow_id = txsc->hw_flow_id;
	req->secy_id = txsc->hw_secy_id_tx;
	req->sc_id = txsc->hw_sc_id;
	req->sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
	req->dir = MCS_TX;
	/* This can be enabled since stack xmits packets only when interface is up */
	req->ena = 1;

	ret = otx2_sync_mbox_msg(mbox);

fail:
	mutex_unlock(&mbox->lock);
	return ret;
}

static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf,
				   struct macsec_secy *secy,
				   struct cn10k_mcs_txsc *txsc,
				   u8 sa_num, bool sa_active)
{
	struct mcs_tx_sc_sa_map *map_req;
	struct mbox *mbox = &pfvf->mbox;
	int ret;

	/* Link the encoding_sa only to SC out of all SAs */
	if (txsc->encoding_sa != sa_num)
		return 0;

	mutex_lock(&mbox->lock);

	map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox);
	if (!map_req) {
		otx2_mbox_reset(&mbox->mbox, 0);
		ret = -ENOMEM;
		goto fail;
	}

	map_req->sa_index0 = txsc->hw_sa_id[sa_num];
	map_req->sa_index0_vld = sa_active;
	map_req->sectag_sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
	map_req->sc_id = txsc->hw_sc_id;

	ret = otx2_sync_mbox_msg(mbox);

fail:
	mutex_unlock(&mbox->lock);
	return ret;
}

static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf,
				      struct macsec_secy *secy,
				      struct cn10k_mcs_txsc *txsc,
				      u8 assoc_num)
{
	unsigned char *src = txsc->sa_key[assoc_num];
	struct mcs_sa_plcy_write_req *plcy_req;
	struct mbox *mbox = &pfvf->mbox;
	u8 reg, key_len;
	int ret;

	mutex_lock(&mbox->lock);

	plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
	if (!plcy_req) {
		ret = -ENOMEM;
		goto fail;
	}

	for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
		memcpy((u8 *)&plcy_req->plcy[0][reg], (src + reg * 8), 8);
		reg++;
	}

	plcy_req->plcy[0][8] = assoc_num;
	plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num];
	plcy_req->sa_cnt = 1;
	plcy_req->dir = MCS_TX;

	ret = otx2_sync_mbox_msg(mbox);

fail:
	mutex_unlock(&mbox->lock);
	return ret;
}

static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf,
				struct cn10k_mcs_txsc *txsc,
				u8 assoc_num, u64 next_pn)
{
	struct mcs_pn_table_write_req *req;
	struct mbox *mbox = &pfvf->mbox;
	int ret;

	mutex_lock(&mbox->lock);

	req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
	if (!req) {
		ret = -ENOMEM;
		goto fail;
	}

	req->pn_id = txsc->hw_sa_id[assoc_num];
	req->next_pn = next_pn;
	req->dir = MCS_TX;

	ret = otx2_sync_mbox_msg(mbox);

fail:
	mutex_unlock(&mbox->lock);
	return ret;
}

static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id,
				    bool enable, enum mcs_direction dir)
{
	struct mcs_flowid_ena_dis_entry *req;
	struct mbox *mbox = &pfvf->mbox;
	int ret;

	mutex_lock(&mbox->lock);

	req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox);
	if (!req) {
		ret = -ENOMEM;
		goto fail;
	}

	req->flow_id = hw_flow_id;
	req->ena = enable;
	req->dir = dir;

	ret = otx2_sync_mbox_msg(mbox);

fail:
	mutex_unlock(&mbox->lock);
	return ret;
}

static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id,
			      struct mcs_sa_stats *rsp_p,
			      enum mcs_direction dir, bool clear)
{
	struct mcs_clear_stats *clear_req;
	struct mbox *mbox = &pfvf->mbox;
	struct mcs_stats_req *req;
	struct mcs_sa_stats *rsp;
	int ret;

	mutex_lock(&mbox->lock);

	req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox);
	if (!req) {
		ret = -ENOMEM;
		goto fail;
	}

	req->id = hw_sa_id;
	req->dir = dir;

	if (!clear)
		goto send_msg;

	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
	if (!clear_req) {
		ret = -ENOMEM;
		goto fail;
	}
	clear_req->id = hw_sa_id;
	clear_req->dir = dir;
	clear_req->type = MCS_RSRC_TYPE_SA;

send_msg:
	ret = otx2_sync_mbox_msg(mbox);
	if (ret)
		goto fail;

	rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
						       0, &req->hdr);
	if (IS_ERR(rsp)) {
		ret = PTR_ERR(rsp);
		goto fail;
	}

	memcpy(rsp_p, rsp, sizeof(*rsp_p));

	mutex_unlock(&mbox->lock);

	return 0;
fail:
	mutex_unlock(&mbox->lock);
	return ret;
}

static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id,
			      struct mcs_sc_stats *rsp_p,
			      enum mcs_direction dir, bool clear)
{
	struct mcs_clear_stats *clear_req;
	struct mbox *mbox = &pfvf->mbox;
	struct mcs_stats_req *req;
	struct mcs_sc_stats *rsp;
	int ret;

	mutex_lock(&mbox->lock);

	req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox);
	if (!req) {
		ret = -ENOMEM;
		goto fail;
	}

	req->id = hw_sc_id;
	req->dir = dir;

	if (!clear)
		goto send_msg;

	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
	if (!clear_req) {
		ret = -ENOMEM;
		goto fail;
	}
	clear_req->id = hw_sc_id;
	clear_req->dir = dir;
	clear_req->type = MCS_RSRC_TYPE_SC;

send_msg:
	ret = otx2_sync_mbox_msg(mbox);
	if (ret)
		goto fail;

	rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
						       0, &req->hdr);
	if (IS_ERR(rsp)) {
		ret = PTR_ERR(rsp);
		goto fail;
	}

	memcpy(rsp_p, rsp, sizeof(*rsp_p));

	mutex_unlock(&mbox->lock);

	return 0;
fail:
	mutex_unlock(&mbox->lock);
	return ret;
}

static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id,
				struct mcs_secy_stats *rsp_p,
				enum mcs_direction dir, bool clear)
{
	struct mcs_clear_stats *clear_req;
	struct mbox *mbox = &pfvf->mbox;
	struct mcs_secy_stats *rsp;
	struct mcs_stats_req *req;
	int ret;

	mutex_lock(&mbox->lock);

	req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox);
	if (!req) {
		ret = -ENOMEM;
		goto fail;
	}

	req->id = hw_secy_id;
	req->dir = dir;

	if (!clear)
		goto send_msg;

	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
	if (!clear_req) {
		ret = -ENOMEM;
		goto fail;
	}
	clear_req->id = hw_secy_id;
	clear_req->dir = dir;
	clear_req->type = MCS_RSRC_TYPE_SECY;

send_msg:
	ret = otx2_sync_mbox_msg(mbox);
	if (ret)
		goto fail;

	rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
							 0, &req->hdr);
	if (IS_ERR(rsp)) {
		ret = PTR_ERR(rsp);
		goto fail;
	}

	memcpy(rsp_p, rsp, sizeof(*rsp_p));

	mutex_unlock(&mbox->lock);

	return 0;
fail:
	mutex_unlock(&mbox->lock);
	return ret;
}

static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf)
{
	struct cn10k_mcs_txsc *txsc;
	int ret;

	txsc = kzalloc(sizeof(*txsc), GFP_KERNEL);
	if (!txsc)
		return ERR_PTR(-ENOMEM);

	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
				   &txsc->hw_flow_id);
	if (ret)
		goto fail;

	/* For a SecY, one TX secy and one RX secy HW resources are needed */
	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
				   &txsc->hw_secy_id_tx);
	if (ret)
		goto free_flowid;

	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
				   &txsc->hw_secy_id_rx);
	if (ret)
		goto free_tx_secy;

	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
				   &txsc->hw_sc_id);
	if (ret)
		goto free_rx_secy;

	return txsc;
free_rx_secy:
	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
			    txsc->hw_secy_id_rx, false);
free_tx_secy:
	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
			    txsc->hw_secy_id_tx, false);
free_flowid:
	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
			    txsc->hw_flow_id, false);
fail:
	kfree(txsc);
	return ERR_PTR(ret);
}

/* Free Tx SC and its SAs(if any) resources to AF
 */
static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf,
				  struct cn10k_mcs_txsc *txsc)
{
	u8 sa_bmap = txsc->sa_bmap;
	u8 sa_num = 0;

	while (sa_bmap) {
		if (sa_bmap & 1) {
			cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy,
						   txsc, sa_num);
			cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
		}
		sa_num++;
		sa_bmap >>= 1;
	}

	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
			    txsc->hw_sc_id, false);
	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
			    txsc->hw_secy_id_rx, false);
	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
			    txsc->hw_secy_id_tx, false);
	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
			    txsc->hw_flow_id, false);
}

static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf)
{
	struct cn10k_mcs_rxsc *rxsc;
	int ret;

	rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL);
	if (!rxsc)
		return ERR_PTR(-ENOMEM);

	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
				   &rxsc->hw_flow_id);
	if (ret)
		goto fail;

	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
				   &rxsc->hw_sc_id);
	if (ret)
		goto free_flowid;

	return rxsc;
free_flowid:
	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
			    rxsc->hw_flow_id, false);
fail:
	kfree(rxsc);
	return ERR_PTR(ret);
}

/* Free Rx SC and its SAs(if any) resources to AF
 */
static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf,
				  struct cn10k_mcs_rxsc *rxsc)
{
	u8 sa_bmap = rxsc->sa_bmap;
	u8 sa_num = 0;

	while (sa_bmap) {
		if (sa_bmap & 1) {
			cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc,
						   sa_num, false);
			cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
		}
		sa_num++;
		sa_bmap >>= 1;
	}

	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
			    rxsc->hw_sc_id, false);
	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
			    rxsc->hw_flow_id, false);
}

static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy,
				 struct cn10k_mcs_txsc *txsc,
				 struct macsec_tx_sa *sw_tx_sa, u8 sa_num)
{
	if (sw_tx_sa) {
		cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
		cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
				     sw_tx_sa->next_pn_halves.lower);
		cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num,
					sw_tx_sa->active);
	}

	cn10k_mcs_write_tx_secy(pfvf, secy, txsc);
	cn10k_mcs_write_tx_flowid(pfvf, secy, txsc);
	/* When updating secy, change RX secy also */
	cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx);

	return 0;
}

static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf,
				 struct macsec_secy *secy, u8 hw_secy_id)
{
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct cn10k_mcs_rxsc *mcs_rx_sc;
	struct macsec_rx_sc *sw_rx_sc;
	struct macsec_rx_sa *sw_rx_sa;
	u8 sa_num;

	for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
	     sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
		mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
		if (unlikely(!mcs_rx_sc))
			continue;

		for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) {
			sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]);
			if (!sw_rx_sa)
				continue;

			cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc,
						   sa_num, sw_rx_sa->active);
			cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num,
						 sw_rx_sa->next_pn_halves.lower);
		}

		cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id);
		cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id);
	}

	return 0;
}

static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf,
				   struct macsec_secy *secy,
				   bool delete)
{
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct cn10k_mcs_rxsc *mcs_rx_sc;
	struct macsec_rx_sc *sw_rx_sc;
	int ret;

	for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
	     sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
		mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
		if (unlikely(!mcs_rx_sc))
			continue;

		ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id,
					       false, MCS_RX);
		if (ret)
			dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n",
				mcs_rx_sc->hw_sc_id);
		if (delete) {
			cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc);
			list_del(&mcs_rx_sc->entry);
			kfree(mcs_rx_sc);
		}
	}

	return 0;
}

static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy,
				 struct cn10k_mcs_txsc *txsc)
{
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct mcs_secy_stats rx_rsp = { 0 };
	struct mcs_sc_stats sc_rsp = { 0 };
	struct cn10k_mcs_rxsc *rxsc;

	/* Because of shared counters for some stats in the hardware, when
	 * updating secy policy take a snapshot of current stats and reset them.
	 * Below are the effected stats because of shared counters.
	 */

	/* Check if sync is really needed */
	if (secy->validate_frames == txsc->last_validate_frames &&
	    secy->protect_frames == txsc->last_protect_frames)
		return;

	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);

	txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
	txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
	txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
	if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT)
		txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
	else
		txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;

	list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
		cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true);

		rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt;
		rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt;

		rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
		rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;

		if (txsc->last_protect_frames)
			rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
		else
			rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;

		if (txsc->last_validate_frames == MACSEC_VALIDATE_CHECK)
			rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
		else
			rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
	}

	txsc->last_validate_frames = secy->validate_frames;
	txsc->last_protect_frames = secy->protect_frames;
}

static int cn10k_mdo_open(struct macsec_context *ctx)
{
	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct macsec_secy *secy = ctx->secy;
	struct macsec_tx_sa *sw_tx_sa;
	struct cn10k_mcs_txsc *txsc;
	u8 sa_num;
	int err;

	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
	if (!txsc)
		return -ENOENT;

	sa_num = txsc->encoding_sa;
	sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);

	err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
	if (err)
		return err;

	return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx);
}

static int cn10k_mdo_stop(struct macsec_context *ctx)
{
	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct cn10k_mcs_txsc *txsc;
	int err;

	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
	if (!txsc)
		return -ENOENT;

	err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
	if (err)
		return err;

	return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false);
}

static int cn10k_mdo_add_secy(struct macsec_context *ctx)
{
	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct macsec_secy *secy = ctx->secy;
	struct cn10k_mcs_txsc *txsc;

	if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
		return -EOPNOTSUPP;

	/* Stick to 16 bytes key len until XPN support is added */
	if (secy->key_len != 16)
		return -EOPNOTSUPP;

	if (secy->xpn)
		return -EOPNOTSUPP;

	txsc = cn10k_mcs_create_txsc(pfvf);
	if (IS_ERR(txsc))
		return -ENOSPC;

	txsc->sw_secy = secy;
	txsc->encoding_sa = secy->tx_sc.encoding_sa;
	txsc->last_validate_frames = secy->validate_frames;
	txsc->last_protect_frames = secy->protect_frames;

	list_add(&txsc->entry, &cfg->txsc_list);

	if (netif_running(secy->netdev))
		return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);

	return 0;
}

static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
{
	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct macsec_secy *secy = ctx->secy;
	struct macsec_tx_sa *sw_tx_sa;
	struct cn10k_mcs_txsc *txsc;
	u8 sa_num;
	int err;

	txsc = cn10k_mcs_get_txsc(cfg, secy);
	if (!txsc)
		return -ENOENT;

	txsc->encoding_sa = secy->tx_sc.encoding_sa;

	sa_num = txsc->encoding_sa;
	sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);

	if (netif_running(secy->netdev)) {
		cn10k_mcs_sync_stats(pfvf, secy, txsc);

		err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
		if (err)
			return err;
	}

	return 0;
}

static int cn10k_mdo_del_secy(struct macsec_context *ctx)
{
	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct cn10k_mcs_txsc *txsc;

	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
	if (!txsc)
		return -ENOENT;

	cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
	cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true);
	cn10k_mcs_delete_txsc(pfvf, txsc);
	list_del(&txsc->entry);
	kfree(txsc);

	return 0;
}

static int cn10k_mdo_add_txsa(struct macsec_context *ctx)
{
	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
	struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct macsec_secy *secy = ctx->secy;
	u8 sa_num = ctx->sa.assoc_num;
	struct cn10k_mcs_txsc *txsc;
	int err;

	txsc = cn10k_mcs_get_txsc(cfg, secy);
	if (!txsc)
		return -ENOENT;

	if (sa_num >= CN10K_MCS_SA_PER_SC)
		return -EOPNOTSUPP;

	if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num]))
		return -ENOSPC;

	memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len);
	txsc->sa_bmap |= 1 << sa_num;

	if (netif_running(secy->netdev)) {
		err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
		if (err)
			return err;

		err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
					   sw_tx_sa->next_pn_halves.lower);
		if (err)
			return err;

		err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
					      sa_num, sw_tx_sa->active);
		if (err)
			return err;
	}

	return 0;
}

static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
{
	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
	struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct macsec_secy *secy = ctx->secy;
	u8 sa_num = ctx->sa.assoc_num;
	struct cn10k_mcs_txsc *txsc;
	int err;

	txsc = cn10k_mcs_get_txsc(cfg, secy);
	if (!txsc)
		return -ENOENT;

	if (sa_num >= CN10K_MCS_SA_PER_SC)
		return -EOPNOTSUPP;

	if (netif_running(secy->netdev)) {
		/* Keys cannot be changed after creation */
		err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
					   sw_tx_sa->next_pn_halves.lower);
		if (err)
			return err;

		err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
					      sa_num, sw_tx_sa->active);
		if (err)
			return err;
	}

	return 0;
}

static int cn10k_mdo_del_txsa(struct macsec_context *ctx)
{
	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	u8 sa_num = ctx->sa.assoc_num;
	struct cn10k_mcs_txsc *txsc;

	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
	if (!txsc)
		return -ENOENT;

	if (sa_num >= CN10K_MCS_SA_PER_SC)
		return -EOPNOTSUPP;

	cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
	txsc->sa_bmap &= ~(1 << sa_num);

	return 0;
}

static int cn10k_mdo_add_rxsc(struct macsec_context *ctx)
{
	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct macsec_secy *secy = ctx->secy;
	struct cn10k_mcs_rxsc *rxsc;
	struct cn10k_mcs_txsc *txsc;
	int err;

	txsc = cn10k_mcs_get_txsc(cfg, secy);
	if (!txsc)
		return -ENOENT;

	rxsc = cn10k_mcs_create_rxsc(pfvf);
	if (IS_ERR(rxsc))
		return -ENOSPC;

	rxsc->sw_secy = ctx->secy;
	rxsc->sw_rxsc = ctx->rx_sc;
	list_add(&rxsc->entry, &cfg->rxsc_list);

	if (netif_running(secy->netdev)) {
		err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx);
		if (err)
			return err;

		err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx);
		if (err)
			return err;
	}

	return 0;
}

static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx)
{
	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct macsec_secy *secy = ctx->secy;
	bool enable = ctx->rx_sc->active;
	struct cn10k_mcs_rxsc *rxsc;

	rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
	if (!rxsc)
		return -ENOENT;

	if (netif_running(secy->netdev))
		return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id,
						enable, MCS_RX);

	return 0;
}

static int cn10k_mdo_del_rxsc(struct macsec_context *ctx)
{
	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct cn10k_mcs_rxsc *rxsc;

	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc);
	if (!rxsc)
		return -ENOENT;

	cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX);
	cn10k_mcs_delete_rxsc(pfvf, rxsc);
	list_del(&rxsc->entry);
	kfree(rxsc);

	return 0;
}

static int cn10k_mdo_add_rxsa(struct macsec_context *ctx)
{
	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
	u64 next_pn = rx_sa->next_pn_halves.lower;
	struct macsec_secy *secy = ctx->secy;
	bool sa_in_use = rx_sa->active;
	u8 sa_num = ctx->sa.assoc_num;
	struct cn10k_mcs_rxsc *rxsc;
	int err;

	rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
	if (!rxsc)
		return -ENOENT;

	if (sa_num >= CN10K_MCS_SA_PER_SC)
		return -EOPNOTSUPP;

	if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num]))
		return -ENOSPC;

	memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len);
	rxsc->sa_bmap |= 1 << sa_num;

	if (netif_running(secy->netdev)) {
		err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc,
						 sa_num, sa_in_use);
		if (err)
			return err;

		err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
		if (err)
			return err;
	}

	return 0;
}

static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
{
	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
	u64 next_pn = rx_sa->next_pn_halves.lower;
	struct macsec_secy *secy = ctx->secy;
	bool sa_in_use = rx_sa->active;
	u8 sa_num = ctx->sa.assoc_num;
	struct cn10k_mcs_rxsc *rxsc;
	int err;

	rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
	if (!rxsc)
		return -ENOENT;

	if (sa_num >= CN10K_MCS_SA_PER_SC)
		return -EOPNOTSUPP;

	if (netif_running(secy->netdev)) {
		err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use);
		if (err)
			return err;

		err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
		if (err)
			return err;
	}

	return 0;
}

static int cn10k_mdo_del_rxsa(struct macsec_context *ctx)
{
	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	u8 sa_num = ctx->sa.assoc_num;
	struct cn10k_mcs_rxsc *rxsc;

	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
	if (!rxsc)
		return -ENOENT;

	if (sa_num >= CN10K_MCS_SA_PER_SC)
		return -EOPNOTSUPP;

	cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false);
	cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);

	rxsc->sa_bmap &= ~(1 << sa_num);

	return 0;
}

static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx)
{
	struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 };
	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct macsec_secy *secy = ctx->secy;
	struct cn10k_mcs_txsc *txsc;

	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
	if (!txsc)
		return -ENOENT;

	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false);
	ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt;
	ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt;

	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
	txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
	txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
	txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
	if (secy->validate_frames == MACSEC_VALIDATE_STRICT)
		txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
	else
		txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
	txsc->stats.InPktsOverrun = 0;

	ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag;
	ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged;
	ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag;
	ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI;
	ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI;
	ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun;

	return 0;
}

static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx)
{
	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct mcs_sc_stats rsp = { 0 };
	struct cn10k_mcs_txsc *txsc;

	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
	if (!txsc)
		return -ENOENT;

	cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false);

	ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt;
	ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
	ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt;
	ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt;

	return 0;
}

static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx)
{
	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct mcs_sa_stats rsp = { 0 };
	u8 sa_num = ctx->sa.assoc_num;
	struct cn10k_mcs_txsc *txsc;

	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
	if (!txsc)
		return -ENOENT;

	if (sa_num >= CN10K_MCS_SA_PER_SC)
		return -EOPNOTSUPP;

	cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false);

	ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt;
	ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;

	return 0;
}

static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
{
	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct macsec_secy *secy = ctx->secy;
	struct mcs_sc_stats rsp = { 0 };
	struct cn10k_mcs_rxsc *rxsc;

	rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
	if (!rxsc)
		return -ENOENT;

	cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true);

	rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt;
	rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt;

	rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
	rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;

	if (secy->protect_frames)
		rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
	else
		rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;

	if (secy->validate_frames == MACSEC_VALIDATE_CHECK)
		rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
	else
		rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;

	ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated;
	ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted;
	ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid;
	ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid;
	ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate;
	ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed;
	ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked;
	ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK;

	return 0;
}

static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx)
{
	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct mcs_sa_stats rsp = { 0 };
	u8 sa_num = ctx->sa.assoc_num;
	struct cn10k_mcs_rxsc *rxsc;

	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
	if (!rxsc)
		return -ENOENT;

	if (sa_num >= CN10K_MCS_SA_PER_SC)
		return -EOPNOTSUPP;

	cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false);

	ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt;
	ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt;
	ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt;
	ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt;
	ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt;

	return 0;
}

static const struct macsec_ops cn10k_mcs_ops = {
	.mdo_dev_open = cn10k_mdo_open,
	.mdo_dev_stop = cn10k_mdo_stop,
	.mdo_add_secy = cn10k_mdo_add_secy,
	.mdo_upd_secy = cn10k_mdo_upd_secy,
	.mdo_del_secy = cn10k_mdo_del_secy,
	.mdo_add_rxsc = cn10k_mdo_add_rxsc,
	.mdo_upd_rxsc = cn10k_mdo_upd_rxsc,
	.mdo_del_rxsc = cn10k_mdo_del_rxsc,
	.mdo_add_rxsa = cn10k_mdo_add_rxsa,
	.mdo_upd_rxsa = cn10k_mdo_upd_rxsa,
	.mdo_del_rxsa = cn10k_mdo_del_rxsa,
	.mdo_add_txsa = cn10k_mdo_add_txsa,
	.mdo_upd_txsa = cn10k_mdo_upd_txsa,
	.mdo_del_txsa = cn10k_mdo_del_txsa,
	.mdo_get_dev_stats = cn10k_mdo_get_dev_stats,
	.mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats,
	.mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats,
	.mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats,
	.mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats,
};

void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event)
{
	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
	struct macsec_tx_sa *sw_tx_sa = NULL;
	struct macsec_secy *secy = NULL;
	struct cn10k_mcs_txsc *txsc;
	u8 an;

	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
		return;

	if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT))
		return;

	/* Find the SecY to which the expired hardware SA is mapped */
	list_for_each_entry(txsc, &cfg->txsc_list, entry) {
		for (an = 0; an < CN10K_MCS_SA_PER_SC; an++)
			if (txsc->hw_sa_id[an] == event->sa_id) {
				secy = txsc->sw_secy;
				sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
			}
	}

	if (secy && sw_tx_sa)
		macsec_pn_wrapped(secy, sw_tx_sa);
}

int cn10k_mcs_init(struct otx2_nic *pfvf)
{
	struct mbox *mbox = &pfvf->mbox;
	struct cn10k_mcs_cfg *cfg;
	struct mcs_intr_cfg *req;

	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
		return 0;

	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
	if (!cfg)
		return -ENOMEM;

	INIT_LIST_HEAD(&cfg->txsc_list);
	INIT_LIST_HEAD(&cfg->rxsc_list);
	pfvf->macsec_cfg = cfg;

	pfvf->netdev->features |= NETIF_F_HW_MACSEC;
	pfvf->netdev->macsec_ops = &cn10k_mcs_ops;

	mutex_lock(&mbox->lock);

	req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox);
	if (!req)
		goto fail;

	req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;

	if (otx2_sync_mbox_msg(mbox))
		goto fail;

	mutex_unlock(&mbox->lock);

	return 0;
fail:
	dev_err(pfvf->dev, "Cannot notify PN wrapped event\n");
	mutex_unlock(&mbox->lock);
	return 0;
}

void cn10k_mcs_free(struct otx2_nic *pfvf)
{
	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
		return;

	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true);
	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true);
	kfree(pfvf->macsec_cfg);
	pfvf->macsec_cfg = NULL;
}