diff --git a/go.sum b/go.sum index 4263840b87b51..474d895800f1c 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs= +4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -15,28 +17,124 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= +cloud.google.com/go/accessapproval v1.7.5/go.mod h1:g88i1ok5dvQ9XJsxpUInWWvUBrIZhyPDPbk4T01OoJ0= +cloud.google.com/go/accesscontextmanager v1.8.5/go.mod h1:TInEhcZ7V9jptGNqN3EzZ5XMhT6ijWxTGjzyETwmL0Q= +cloud.google.com/go/aiplatform v1.60.0/go.mod h1:eTlGuHOahHprZw3Hio5VKmtThIOak5/qy6pzdsqcQnM= +cloud.google.com/go/analytics v0.23.0/go.mod h1:YPd7Bvik3WS95KBok2gPXDqQPHy08TsCQG6CdUCb+u0= +cloud.google.com/go/apigateway v1.6.5/go.mod h1:6wCwvYRckRQogyDDltpANi3zsCDl6kWi0b4Je+w2UiI= +cloud.google.com/go/apigeeconnect v1.6.5/go.mod h1:MEKm3AiT7s11PqTfKE3KZluZA9O91FNysvd3E6SJ6Ow= +cloud.google.com/go/apigeeregistry v0.8.3/go.mod h1:aInOWnqF4yMQx8kTjDqHNXjZGh/mxeNlAf52YqtASUs= +cloud.google.com/go/appengine v1.8.5/go.mod h1:uHBgNoGLTS5di7BvU25NFDuKa82v0qQLjyMJLuPQrVo= +cloud.google.com/go/area120 v0.8.5/go.mod h1:BcoFCbDLZjsfe4EkCnEq1LKvHSK0Ew/zk5UFu6GMyA0= +cloud.google.com/go/artifactregistry v1.14.7/go.mod h1:0AUKhzWQzfmeTvT4SjfI4zjot72EMfrkvL9g9aRjnnM= +cloud.google.com/go/asset v1.17.2/go.mod h1:SVbzde67ehddSoKf5uebOD1sYw8Ab/jD/9EIeWg99q4= +cloud.google.com/go/assuredworkloads v1.11.5/go.mod h1:FKJ3g3ZvkL2D7qtqIGnDufFkHxwIpNM9vtmhvt+6wqk= +cloud.google.com/go/automl v1.13.5/go.mod h1:MDw3vLem3yh+SvmSgeYUmUKqyls6NzSumDm9OJ3xJ1Y= +cloud.google.com/go/baremetalsolution v1.2.4/go.mod h1:BHCmxgpevw9IEryE99HbYEfxXkAEA3hkMJbYYsHtIuY= +cloud.google.com/go/batch v1.8.0/go.mod h1:k8V7f6VE2Suc0zUM4WtoibNrA6D3dqBpB+++e3vSGYc= +cloud.google.com/go/beyondcorp v1.0.4/go.mod h1:Gx8/Rk2MxrvWfn4WIhHIG1NV7IBfg14pTKv1+EArVcc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.59.1/go.mod h1:VP1UJYgevyTwsV7desjzNzDND5p6hZB+Z8gZJN1GQUc= +cloud.google.com/go/billing v1.18.2/go.mod h1:PPIwVsOOQ7xzbADCwNe8nvK776QpfrOAUkvKjCUcpSE= +cloud.google.com/go/binaryauthorization v1.8.1/go.mod h1:1HVRyBerREA/nhI7yLang4Zn7vfNVA3okoAR9qYQJAQ= +cloud.google.com/go/certificatemanager v1.7.5/go.mod h1:uX+v7kWqy0Y3NG/ZhNvffh0kuqkKZIXdvlZRO7z0VtM= +cloud.google.com/go/channel v1.17.5/go.mod h1:FlpaOSINDAXgEext0KMaBq/vwpLMkkPAw9b2mApQeHc= +cloud.google.com/go/cloudbuild v1.15.1/go.mod h1:gIofXZSu+XD2Uy+qkOrGKEx45zd7s28u/k8f99qKals= +cloud.google.com/go/clouddms v1.7.4/go.mod h1:RdrVqoFG9RWI5AvZ81SxJ/xvxPdtcRhFotwdE79DieY= +cloud.google.com/go/cloudtasks v1.12.6/go.mod h1:b7c7fe4+TJsFZfDyzO51F7cjq7HLUlRi/KZQLQjDsaY= cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.13.0/go.mod h1:ieq5d5EtHsu8vhe2y3amtZ+BE+AQwX5qAy7cpo0POsI= +cloud.google.com/go/container v1.31.0/go.mod h1:7yABn5s3Iv3lmw7oMmyGbeV6tQj86njcTijkkGuvdZA= +cloud.google.com/go/containeranalysis v0.11.4/go.mod h1:cVZT7rXYBS9NG1rhQbWL9pWbXCKHWJPYraE8/FTSYPE= +cloud.google.com/go/datacatalog v1.19.3/go.mod h1:ra8V3UAsciBpJKQ+z9Whkxzxv7jmQg1hfODr3N3YPJ4= +cloud.google.com/go/dataflow v0.9.5/go.mod h1:udl6oi8pfUHnL0z6UN9Lf9chGqzDMVqcYTcZ1aPnCZQ= +cloud.google.com/go/dataform v0.9.2/go.mod h1:S8cQUwPNWXo7m/g3DhWHsLBoufRNn9EgFrMgne2j7cI= +cloud.google.com/go/datafusion v1.7.5/go.mod h1:bYH53Oa5UiqahfbNK9YuYKteeD4RbQSNMx7JF7peGHc= +cloud.google.com/go/datalabeling v0.8.5/go.mod h1:IABB2lxQnkdUbMnQaOl2prCOfms20mcPxDBm36lps+s= +cloud.google.com/go/dataplex v1.14.2/go.mod h1:0oGOSFlEKef1cQeAHXy4GZPB/Ife0fz/PxBf+ZymA2U= +cloud.google.com/go/dataproc/v2 v2.4.0/go.mod h1:3B1Ht2aRB8VZIteGxQS/iNSJGzt9+CA0WGnDVMEm7Z4= +cloud.google.com/go/dataqna v0.8.5/go.mod h1:vgihg1mz6n7pb5q2YJF7KlXve6tCglInd6XO0JGOlWM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.15.0/go.mod h1:GAeStMBIt9bPS7jMJA85kgkpsMkvseWWXiaHya9Jes8= +cloud.google.com/go/datastream v1.10.4/go.mod h1:7kRxPdxZxhPg3MFeCSulmAJnil8NJGGvSNdn4p1sRZo= +cloud.google.com/go/deploy v1.17.1/go.mod h1:SXQyfsXrk0fBmgBHRzBjQbZhMfKZ3hMQBw5ym7MN/50= +cloud.google.com/go/dialogflow v1.49.0/go.mod h1:dhVrXKETtdPlpPhE7+2/k4Z8FRNUp6kMV3EW3oz/fe0= +cloud.google.com/go/dlp v1.11.2/go.mod h1:9Czi+8Y/FegpWzgSfkRlyz+jwW6Te9Rv26P3UfU/h/w= +cloud.google.com/go/documentai v1.25.0/go.mod h1:ftLnzw5VcXkLItp6pw1mFic91tMRyfv6hHEY5br4KzY= +cloud.google.com/go/domains v0.9.5/go.mod h1:dBzlxgepazdFhvG7u23XMhmMKBjrkoUNaw0A8AQB55Y= +cloud.google.com/go/edgecontainer v1.1.5/go.mod h1:rgcjrba3DEDEQAidT4yuzaKWTbkTI5zAMu3yy6ZWS0M= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.6.6/go.mod h1:XbqHJGaiH0v2UvtuucfOzFXN+rpL/aU5BCZLn4DYl1Q= +cloud.google.com/go/eventarc v1.13.4/go.mod h1:zV5sFVoAa9orc/52Q+OuYUG9xL2IIZTbbuTHC6JSY8s= +cloud.google.com/go/filestore v1.8.1/go.mod h1:MbN9KcaM47DRTIuLfQhJEsjaocVebNtNQhSLhKCF5GM= +cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ= +cloud.google.com/go/functions v1.16.0/go.mod h1:nbNpfAG7SG7Duw/o1iZ6ohvL7mc6MapWQVpqtM29n8k= +cloud.google.com/go/gkebackup v1.3.5/go.mod h1:KJ77KkNN7Wm1LdMopOelV6OodM01pMuK2/5Zt1t4Tvc= +cloud.google.com/go/gkeconnect v0.8.5/go.mod h1:LC/rS7+CuJ5fgIbXv8tCD/mdfnlAadTaUufgOkmijuk= +cloud.google.com/go/gkehub v0.14.5/go.mod h1:6bzqxM+a+vEH/h8W8ec4OJl4r36laxTs3A/fMNHJ0wA= +cloud.google.com/go/gkemulticloud v1.1.1/go.mod h1:C+a4vcHlWeEIf45IB5FFR5XGjTeYhF83+AYIpTy4i2Q= +cloud.google.com/go/gsuiteaddons v1.6.5/go.mod h1:Lo4P2IvO8uZ9W+RaC6s1JVxo42vgy+TX5a6hfBZ0ubs= cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= +cloud.google.com/go/iap v1.9.4/go.mod h1:vO4mSq0xNf/Pu6E5paORLASBwEmphXEjgCFg7aeNu1w= +cloud.google.com/go/ids v1.4.5/go.mod h1:p0ZnyzjMWxww6d2DvMGnFwCsSxDJM666Iir1bK1UuBo= +cloud.google.com/go/iot v1.7.5/go.mod h1:nq3/sqTz3HGaWJi1xNiX7F41ThOzpud67vwk0YsSsqs= cloud.google.com/go/kms v1.15.7 h1:7caV9K3yIxvlQPAcaFffhlT7d1qpxjB1wHBtjWa13SM= cloud.google.com/go/kms v1.15.7/go.mod h1:ub54lbsa6tDkUwnu4W7Yt1aAIFLnspgh0kPGToDukeI= +cloud.google.com/go/language v1.12.3/go.mod h1:evFX9wECX6mksEva8RbRnr/4wi/vKGYnAJrTRXU8+f8= +cloud.google.com/go/lifesciences v0.9.5/go.mod h1:OdBm0n7C0Osh5yZB7j9BXyrMnTRGBJIZonUMxo5CzPw= +cloud.google.com/go/logging v1.9.0/go.mod h1:1Io0vnZv4onoUnsVUQY3HZ3Igb1nBchky0A0y7BBBhE= +cloud.google.com/go/longrunning v0.5.5/go.mod h1:WV2LAxD8/rg5Z1cNW6FJ/ZpX4E4VnDnoTk0yawPBB7s= +cloud.google.com/go/managedidentities v1.6.5/go.mod h1:fkFI2PwwyRQbjLxlm5bQ8SjtObFMW3ChBGNqaMcgZjI= +cloud.google.com/go/maps v1.6.4/go.mod h1:rhjqRy8NWmDJ53saCfsXQ0LKwBHfi6OSh5wkq6BaMhI= +cloud.google.com/go/mediatranslation v0.8.5/go.mod h1:y7kTHYIPCIfgyLbKncgqouXJtLsU+26hZhHEEy80fSs= +cloud.google.com/go/memcache v1.10.5/go.mod h1:/FcblbNd0FdMsx4natdj+2GWzTq+cjZvMa1I+9QsuMA= +cloud.google.com/go/metastore v1.13.4/go.mod h1:FMv9bvPInEfX9Ac1cVcRXp8EBBQnBcqH6gz3KvJ9BAE= +cloud.google.com/go/monitoring v1.18.0/go.mod h1:c92vVBCeq/OB4Ioyo+NbN2U7tlg5ZH41PZcdvfc+Lcg= +cloud.google.com/go/networkconnectivity v1.14.4/go.mod h1:PU12q++/IMnDJAB+3r+tJtuCXCfwfN+C6Niyj6ji1Po= +cloud.google.com/go/networkmanagement v1.9.4/go.mod h1:daWJAl0KTFytFL7ar33I6R/oNBH8eEOX/rBNHrC/8TA= +cloud.google.com/go/networksecurity v0.9.5/go.mod h1:KNkjH/RsylSGyyZ8wXpue8xpCEK+bTtvof8SBfIhMG8= +cloud.google.com/go/notebooks v1.11.3/go.mod h1:0wQyI2dQC3AZyQqWnRsp+yA+kY4gC7ZIVP4Qg3AQcgo= +cloud.google.com/go/optimization v1.6.3/go.mod h1:8ve3svp3W6NFcAEFr4SfJxrldzhUl4VMUJmhrqVKtYA= +cloud.google.com/go/orchestration v1.8.5/go.mod h1:C1J7HesE96Ba8/hZ71ISTV2UAat0bwN+pi85ky38Yq8= +cloud.google.com/go/orgpolicy v1.12.1/go.mod h1:aibX78RDl5pcK3jA8ysDQCFkVxLj3aOQqrbBaUL2V5I= +cloud.google.com/go/osconfig v1.12.5/go.mod h1:D9QFdxzfjgw3h/+ZaAb5NypM8bhOMqBzgmbhzWViiW8= +cloud.google.com/go/oslogin v1.13.1/go.mod h1:vS8Sr/jR7QvPWpCjNqy6LYZr5Zs1e8ZGW/KPn9gmhws= +cloud.google.com/go/phishingprotection v0.8.5/go.mod h1:g1smd68F7mF1hgQPuYn3z8HDbNre8L6Z0b7XMYFmX7I= +cloud.google.com/go/policytroubleshooter v1.10.3/go.mod h1:+ZqG3agHT7WPb4EBIRqUv4OyIwRTZvsVDHZ8GlZaoxk= +cloud.google.com/go/privatecatalog v0.9.5/go.mod h1:fVWeBOVe7uj2n3kWRGlUQqR/pOd450J9yZoOECcQqJk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/pubsub v1.36.1 h1:dfEPuGCHGbWUhaMCTHUFjfroILEkx55iUmKBZTP5f+Y= cloud.google.com/go/pubsub v1.36.1/go.mod h1:iYjCa9EzWOoBiTdd4ps7QoMtMln5NwaZQpK1hbRfBDE= +cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= +cloud.google.com/go/recaptchaenterprise/v2 v2.9.2/go.mod h1:trwwGkfhCmp05Ll5MSJPXY7yvnO0p4v3orGANAFHAuU= +cloud.google.com/go/recommendationengine v0.8.5/go.mod h1:A38rIXHGFvoPvmy6pZLozr0g59NRNREz4cx7F58HAsQ= +cloud.google.com/go/recommender v1.12.1/go.mod h1:gf95SInWNND5aPas3yjwl0I572dtudMhMIG4ni8nr+0= +cloud.google.com/go/redis v1.14.2/go.mod h1:g0Lu7RRRz46ENdFKQ2EcQZBAJ2PtJHJLuiiRuEXwyQw= +cloud.google.com/go/resourcemanager v1.9.5/go.mod h1:hep6KjelHA+ToEjOfO3garMKi/CLYwTqeAw7YiEI9x8= +cloud.google.com/go/resourcesettings v1.6.5/go.mod h1:WBOIWZraXZOGAgoR4ukNj0o0HiSMO62H9RpFi9WjP9I= +cloud.google.com/go/retail v1.16.0/go.mod h1:LW7tllVveZo4ReWt68VnldZFWJRzsh9np+01J9dYWzE= +cloud.google.com/go/run v1.3.4/go.mod h1:FGieuZvQ3tj1e9GnzXqrMABSuir38AJg5xhiYq+SF3o= +cloud.google.com/go/scheduler v1.10.6/go.mod h1:pe2pNCtJ+R01E06XCDOJs1XvAMbv28ZsQEbqknxGOuE= +cloud.google.com/go/secretmanager v1.11.5/go.mod h1:eAGv+DaCHkeVyQi0BeXgAHOU0RdrMeZIASKc+S7VqH4= +cloud.google.com/go/security v1.15.5/go.mod h1:KS6X2eG3ynWjqcIX976fuToN5juVkF6Ra6c7MPnldtc= +cloud.google.com/go/securitycenter v1.24.4/go.mod h1:PSccin+o1EMYKcFQzz9HMMnZ2r9+7jbc+LvPjXhpwcU= +cloud.google.com/go/servicedirectory v1.11.4/go.mod h1:Bz2T9t+/Ehg6x+Y7Ycq5xiShYLD96NfEsWNHyitj1qM= +cloud.google.com/go/shell v1.7.5/go.mod h1:hL2++7F47/IfpfTO53KYf1EC+F56k3ThfNEXd4zcuiE= +cloud.google.com/go/spanner v1.56.0/go.mod h1:DndqtUKQAt3VLuV2Le+9Y3WTnq5cNKrnLb/Piqcj+h0= +cloud.google.com/go/speech v1.21.1/go.mod h1:E5GHZXYQlkqWQwY5xRSLHw2ci5NMQNG52FfMU1aZrIA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -44,14 +142,36 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8= cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= +cloud.google.com/go/storagetransfer v1.10.4/go.mod h1:vef30rZKu5HSEf/x1tK3WfWrL0XVoUQN/EPDRGPzjZs= +cloud.google.com/go/talent v1.6.6/go.mod h1:y/WQDKrhVz12WagoarpAIyKKMeKGKHWPoReZ0g8tseQ= +cloud.google.com/go/texttospeech v1.7.5/go.mod h1:tzpCuNWPwrNJnEa4Pu5taALuZL4QRRLcb+K9pbhXT6M= +cloud.google.com/go/tpu v1.6.5/go.mod h1:P9DFOEBIBhuEcZhXi+wPoVy/cji+0ICFi4TtTkMHSSs= +cloud.google.com/go/trace v1.10.5/go.mod h1:9hjCV1nGBCtXbAE4YK7OqJ8pmPYSxPA0I67JwRd5s3M= +cloud.google.com/go/translate v1.10.1/go.mod h1:adGZcQNom/3ogU65N9UXHOnnSvjPwA/jKQUMnsYXOyk= +cloud.google.com/go/video v1.20.4/go.mod h1:LyUVjyW+Bwj7dh3UJnUGZfyqjEto9DnrvTe1f/+QrW0= +cloud.google.com/go/videointelligence v1.11.5/go.mod h1:/PkeQjpRponmOerPeJxNPuxvi12HlW7Em0lJO14FC3I= +cloud.google.com/go/vision/v2 v2.8.0/go.mod h1:ocqDiA2j97pvgogdyhoxiQp2ZkDCyr0HWpicywGGRhU= +cloud.google.com/go/vmmigration v1.7.5/go.mod h1:pkvO6huVnVWzkFioxSghZxIGcsstDvYiVCxQ9ZH3eYI= +cloud.google.com/go/vmwareengine v1.1.1/go.mod h1:nMpdsIVkUrSaX8UvmnBhzVzG7PPvNYc5BszcvIVudYs= +cloud.google.com/go/vpcaccess v1.7.5/go.mod h1:slc5ZRvvjP78c2dnL7m4l4R9GwL3wDLcpIWz6P/ziig= +cloud.google.com/go/webrisk v1.9.5/go.mod h1:aako0Fzep1Q714cPEM5E+mtYX8/jsfegAuS8aivxy3U= +cloud.google.com/go/websecurityscanner v1.6.5/go.mod h1:QR+DWaxAz2pWooylsBF854/Ijvuoa3FCyS1zBa1rAVQ= +cloud.google.com/go/workflows v1.12.4/go.mod h1:yQ7HUqOkdJK4duVtMeBCAOPiN1ZF1E9pAMX51vpwB/w= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/4meepo/tagalign v1.3.3/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE= +github.com/Abirdcfly/dupword v0.0.13/go.mod h1:Ut6Ue2KgF/kCOawpW4LnExT+xZLQviJPE4klBPMK/5Y= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/Antonboom/errname v0.1.12/go.mod h1:bK7todrzvlaZoQagP1orKzWXv59X/x0W0Io2XT1Ssro= +github.com/Antonboom/nilnil v0.1.7/go.mod h1:TP+ScQWVEq0eSIxqU8CbdT5DFWoHp0MbP+KMUO1BKYQ= +github.com/Antonboom/testifylint v1.1.2/go.mod h1:9PFi+vWa8zzl4/B/kqmFJcw85ZUv8ReyBzuQCd30+WI= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.4.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= @@ -64,14 +184,18 @@ github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbi github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw= github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w= +github.com/Code-Hex/go-generics-cache v1.3.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0/go.mod h1:Nl76DrGNJTA1KJ0LePKBw/vznBX1EHbAZX8mwjR82nI= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= +github.com/KimMachineGun/automemlimit v0.5.0/go.mod h1:di3GCKiu9Y+1fs92erCbUvKzPkNyViN3mA0vti/ykEQ= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= @@ -80,6 +204,8 @@ github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030I github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/Shopify/sarama v1.29.0 h1:ARid8o8oieau9XrHI55f/L3EoRAhm9px6sonbD7yuUE= github.com/Shopify/sarama v1.29.0/go.mod h1:2QpgD79wpdAESqNQMxNc0KYMkycd4slxGdV3TWSVqrU= @@ -94,10 +220,15 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpH github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/go-check-sumtype v0.1.4/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA= github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= +github.com/alexkohler/nakedret/v2 v2.0.2/go.mod h1:2b8Gkk0GsOrqQv/gPWjNLDSKwG8I5moSXG1K4VIBcTQ= +github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= +github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1581 h1:Q/yk4z/cHUVZfgTqtD09qeYBxHwshQAjVRX73qs8UH0= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1581/go.mod h1:RcDobYh8k5VP6TNybz9m++gL3ijVI5wueVr0EM10VsU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -108,8 +239,11 @@ github.com/apache/thrift v0.13.1-0.20201008052519-daf620915714/go.mod h1:cp2SuWM github.com/apache/thrift v0.16.0 h1:qEy6UW60iVOlUy+b9ZR0d5WzUWYGOo4HfopoyBaNmoY= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= github.com/aws/aws-sdk-go v1.30.19/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= @@ -126,21 +260,31 @@ github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= github.com/blacktear23/go-proxyprotocol v1.0.6 h1:eTt6UMpEnq59NjON49b3Cay8Dm0sCs1nDliwgkyEsRM= github.com/blacktear23/go-proxyprotocol v1.0.6/go.mod h1:FSCbgnRZrQXazBLL5snfBbrcFSMtcmUDhSRb9OfFA1o= +github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= github.com/bmatcuk/doublestar/v2 v2.0.4 h1:6I6oUiT/sU27eE2OFcWqBhL1SwjyvQuOssxT4a1yidI= github.com/bmatcuk/doublestar/v2 v2.0.4/go.mod h1:QMmcs3H2AUQICWhfzLXz+IYln8lRQmTZRptLie8RgRw= +github.com/bombsimon/wsl/v4 v4.2.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo= +github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ= +github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U= +github.com/butuzov/ireturn v0.3.0/go.mod h1:A09nIiwiqzN/IoVo9ogpa0Hzi9fex1kd9PSD6edP5ZA= github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI= github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7GF45AE= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 h1:BjkPE3785EwPhhyuFkbINB+2a1xATwk8SNDWnJiD41g= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5/go.mod h1:jtAfVaU/2cu1+wdSRPWE2c1N2qeAA3K4RH9pYgqwets= github.com/carlmjohnson/flagext v0.21.0 h1:/c4uK3ie786Z7caXLcIMvePNSSiH3bQVGDvmGLMme60= github.com/carlmjohnson/flagext v0.21.0/go.mod h1:Eenv0epIUAr4NuedNmkzI8WmBmjIxZC239XcKxYS2ac= +github.com/catenacyber/perfsprint v0.6.0/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50= +github.com/ccojocar/zxcvbn-go v1.0.2/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= @@ -153,13 +297,19 @@ github.com/cheggaaa/pb/v3 v3.0.8 h1:bC8oemdChbke2FHIIGy9mn4DPJ2caZYQnfbRqwmdCoA= github.com/cheggaaa/pb/v3 v3.0.8/go.mod h1:UICbiLec/XO6Hw6k+BHEtHeQFzzBH4i2/qk/ow1EJTA= github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4YrOU54= github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= +github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= +github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= +github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudfoundry/gosigar v1.3.6 h1:gIc08FbB3QPb+nAQhINIK/qhf5REKkY0FTGgRGXkcVc= github.com/cloudfoundry/gosigar v1.3.6/go.mod h1:lNWstu5g5gw59O09Y+wsMNFzBSnU8a0u+Sfx4dq360E= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4= @@ -179,6 +329,8 @@ github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/colinmarc/hdfs/v2 v2.1.1/go.mod h1:M3x+k8UKKmxtFu++uAZ0OtDU8jR3jnaZIAc6yK4Ue0c= +github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coocood/bbloom v0.0.0-20190830030839-58deb6228d64 h1:W1SHiII3e0jVwvaQFglwu3kS9NLxOeTpvik7MbKCyuQ= github.com/coocood/bbloom v0.0.0-20190830030839-58deb6228d64/go.mod h1:F86k/6c7aDUdwSUevnLpHS/3Q9hzYCE99jGk2xsHnt0= github.com/coocood/freecache v1.2.1 h1:/v1CqMq45NFH9mp/Pt142reundeBM0dVUD3osQBeu/U= @@ -196,8 +348,12 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= +github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= +github.com/cznic/strutil v0.0.0-20181122101858-275e90344537/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= github.com/daixiang0/gci v0.12.1 h1:ugsG+KRYny1VK4oqrX4Vtj70bo4akYKa0tgT1DXMYiY= github.com/daixiang0/gci v0.12.1/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= github.com/danjacques/gofslock v0.0.0-20191023191349-0a45f885bc37 h1:X6mKGhCFOxrKeeHAjv/3UvT6e5RRxW6wRdlqlV6/H4w= @@ -206,8 +362,10 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= @@ -217,8 +375,12 @@ github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUn github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/digitalocean/godo v1.108.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/docker v25.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dolthub/maphash v0.1.0 h1:bsQ7JsF4FkkWyrP3oCnFJgrCUAFbFf3kOl4L/QxPDyQ= @@ -234,16 +396,24 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= +github.com/etcd-io/gofail v0.0.0-20190801230047-ad7f989257ca/go.mod h1:49H/RkXP8pKaZy4h0d+NW16rSLhyVBt4o6VLJbmOqDE= +github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatanugraha/noloopclosure v0.1.1 h1:AhepjAikNpk50qTZoipHZqeZtnyKT/C2Tk5dGn7nC+A= github.com/fatanugraha/noloopclosure v0.1.1/go.mod h1:Mi9CiG5QvEgvPLtZLsTzjYwjIDnWAbo10r0BG7JpJII= @@ -258,6 +428,7 @@ github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNu github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.6-0.20210809144907-32ab6a8243d7+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= @@ -273,19 +444,23 @@ github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyT github.com/fsouza/fake-gcs-server v1.44.0 h1:Lw/mrvs45AfCUPVpry6qFkZnZPqe9thpLQHW+ZwHRLs= github.com/fsouza/fake-gcs-server v1.44.0/go.mod h1:M02aKoTv9Tnlf+gmWnTok1PWVCUHDntVbHxpd0krTfo= github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= +github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghostiam/protogetter v0.3.4/go.mod h1:A0JgIhs0fgVnotGinjQiKaFVG3waItLJNwPmcMzDnvk= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A= github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-critic/go-critic v0.11.1/go.mod h1:aZVQR7+gazH6aDEQx4356SD7d8ez8MipYjXbEl5JAKA= github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= @@ -301,18 +476,43 @@ github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AE github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= +github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho= +github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= +github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/strfmt v0.22.0/go.mod h1:HzJ9kokGIju3/K6ap8jL+OlGAbjpSv27135Yr9OivU4= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-resty/resty/v2 v2.11.0 h1:i7jMfNOJYMp69lq7qozJP+bjgzfAzeOhuGlyDrqxT/8= github.com/go-resty/resty/v2 v2.11.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= +github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw= +github.com/go-toolsmith/astequal v1.2.0/go.mod h1:c8NZ3+kSFtFY/8lPso4v8LuJjdJiUFVnSuU3s0qrrDY= +github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4= +github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA= +github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= +github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= +github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -323,6 +523,7 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= +github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= @@ -344,6 +545,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -369,21 +571,29 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= github.com/golangci/gofmt v0.0.0-20231019111953-be8c47862aaa h1:L0Zq43Px2HrLroRKEgfCsQLMJUkjskJBB1kd1Zjcvvc= github.com/golangci/gofmt v0.0.0-20231019111953-be8c47862aaa/go.mod h1:Pm5KhLPA8gSnQwrQ6ukebRcapGb/BG9iUkdaiCcGHJM= github.com/golangci/golangci-lint v1.56.2 h1:dgQzlWHgNbCqJjuxRJhFEnHDVrrjuTGQHJ3RIZMpp/o= github.com/golangci/golangci-lint v1.56.2/go.mod h1:7CfNO675+EY7j84jihO4iAqDQ80s3HCjcc5M6B7SlZQ= github.com/golangci/gosec v0.0.0-20180901114220-8afd9cbb6cfb h1:Bi7BYmZVg4C+mKGi8LeohcP2GGUl2XJD4xCkJoZSaYc= github.com/golangci/gosec v0.0.0-20180901114220-8afd9cbb6cfb/go.mod h1:ON/c2UR0VAAv6ZEAFKhjCLplESSmRFfZcDLASbI1GWo= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= github.com/golangci/misspell v0.4.1 h1:+y73iSicVy2PqyX7kmUefHusENlrP9YwuHZHPLGQj/g= github.com/golangci/misspell v0.4.1/go.mod h1:9mAN1quEo3DlpbaIKKyEvRxK1pwqR9s/Sea1bJCtlNI= github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21 h1:leSNB7iYzLYSSx3J/s5sVf4Drkc68W2wm4Ixh/mr0us= github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= +github.com/golangci/revgrep v0.5.2/go.mod h1:bjAMA+Sh/QUfTDcHzxfyHxr4xKvllVr/0sCv2e7jJHA= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -400,6 +610,7 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github/v33 v33.0.0/go.mod h1:GMdDnVZY/2TsWgp/lkYnpSAh6TrzhANBBwm6k6TTEXg= +github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -439,6 +650,7 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.1.0 h1:y2Gd/9I7MdY1oEIt+n+rowjBNDcLQq3RsH5hwJd0f9s= github.com/gordonklaus/ineffassign v0.1.0/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= @@ -458,6 +670,7 @@ github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= github.com/gostaticanalysis/forcetypeassert v0.1.0 h1:6eUflI3DiGusXGK6X7cCcIgVCpZ2CiZ1Q7jl6ZxNV70= github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= @@ -471,6 +684,15 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= +github.com/hashicorp/consul/api v1.27.0/go.mod h1:JkekNRSou9lANFdt+4IKx3Za7XY0JzzpQjEb4Ivo1c8= +github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-uuid v0.0.0-20180228145832-27454136f036/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -480,7 +702,11 @@ github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mO github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c/go.mod h1:O23qLAZuCx4htdY9zBaO4cJPXgleSFEdq6D/sezGgYE= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/hetznercloud/hcloud-go/v2 v2.6.0/go.mod h1:4J1cSE57+g0WS93IiHLV7ubTHItcp+awzeBp5bM9mfA= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -489,6 +715,7 @@ github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= @@ -498,6 +725,7 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/tdigest v0.0.1 h1:XpFptwYmnEKUqmkcDjrzffswZ3nvNeevbUSLPP/ZzIY= github.com/influxdata/tdigest v0.0.1/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y= +github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI= @@ -527,8 +755,11 @@ github.com/jfcg/sixb v1.3.8 h1:BKPp/mIFCkKnnqhbgasI4wO/BYas6NHNcUCowUfTzSI= github.com/jfcg/sixb v1.3.8/go.mod h1:UWrAr1q9s7pSPPqZNccmQM4N75p8GvuBYdFuq+09Qns= github.com/jfcg/sorty/v2 v2.1.0 h1:EjrVSL3cDRxBt/ehiYCIv10F7YHYbTzEmdv7WbkkN1k= github.com/jfcg/sorty/v2 v2.1.0/go.mod h1:JpcSKlmtGOOAGyTdWN2ErjvxeMSJVYBsylAKepIxmNg= +github.com/jgautheron/goconst v1.7.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= +github.com/jjti/go-spancheck v0.5.2/go.mod h1:ARPNI1JRG1V2Rjnd6/2f2NEfghjSVDZGVmruNKlnXU0= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -541,6 +772,7 @@ github.com/joho/sqltocsv v0.0.0-20210428211105-a6d6801d59df h1:Zrb0IbuLOGHL7nrO2 github.com/joho/sqltocsv v0.0.0-20210428211105-a6d6801d59df/go.mod h1:mAVCUAYtW9NG31eB30umMSLKcDt6mCUWSjoSn5qBh0k= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -553,6 +785,8 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk= @@ -565,6 +799,7 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/errcheck v1.7.0 h1:+SbscKmWJ5mOK/bO1zS60F5I9WwZDWOfRsC4RwfwRV0= github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg= github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= @@ -577,6 +812,7 @@ github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6K github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -588,12 +824,17 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/ks3sdklib/aws-sdk-go v1.2.9 h1:Eg0fM56r4Gjp9PiK1Bg9agJUxCAWCk236qq9DItfLcw= github.com/ks3sdklib/aws-sdk-go v1.2.9/go.mod h1:xBNbOrxSnd36AQpZ8o99mGGu+blblUd9rI0MKGmeufo= +github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= +github.com/kunwardeep/paralleltest v1.0.9/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= +github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4= +github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= github.com/lestrrat-go/blackmagic v1.0.2 h1:Cg2gVSc9h7sz9NOByczrbUvLopQmXrfFx//N+AkAr5k= github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= @@ -606,10 +847,18 @@ github.com/lestrrat-go/jwx/v2 v2.0.21 h1:jAPKupy4uHgrHFEdjVjNkUgoBKtVDgrQPB/h55F github.com/lestrrat-go/jwx/v2 v2.0.21/go.mod h1:09mLW8zto6bWL9GbwnqAli+ArLf+5M33QLQPDggkUWM= github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU= github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= +github.com/linode/linodego v1.27.1/go.mod h1:5oAsx+uinHtVo6U77nXXXtox7MWzUW6aEkTOKXxA9uo= +github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a h1:N9zuLhTvBSRt0gWSiJswwQ2HqDmtX/ZCDJURnKUt1Ik= github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/macabu/inamedparam v0.1.3/go.mod h1:93FLICAIk/quk7eaPPQvbzihUdn/QkGDwIZEoLtpH6I= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= +github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc= +github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= @@ -628,27 +877,40 @@ github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= +github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= github.com/mgechev/revive v1.3.7 h1:502QY0vQGe9KtYJ9FpxMz9rL+Fc/P13CI5POL4uHCcE= github.com/mgechev/revive v1.3.7/go.mod h1:RJ16jUbF0OWC3co/+XTxmFNgEpUPwnnA0BRllX2aDNA= github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= @@ -662,11 +924,15 @@ github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8 github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef h1:K0Fn+DoFqNqktdZtdV3bPQ/0cuYh2H4rkg0tytX/07k= github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= +github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8= +github.com/nunnatsa/ginkgolinter v0.15.2/go.mod h1:oYxE7dt1vZI8cK2rZOs3RgTaBN2vggkqnENmoJ8kVvc= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= @@ -676,10 +942,14 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -692,8 +962,12 @@ github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6 github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/otiai10/mint v1.3.1 h1:BCmzIS3n71sGfHB5NMNDB3lHYPz8fWSkCAErHed//qc= github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pborman/getopt v0.0.0-20180729010549-6fdd0a2c7117/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/petermattis/goid v0.0.0-20231207134359-e60b3f734c67 h1:jik8PHtAIsPlCRJjJzl4udgEf7hawInF9texMeO2jrU= github.com/petermattis/goid v0.0.0-20231207134359-e60b3f734c67/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= @@ -738,11 +1012,13 @@ github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6kt github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polyfloyd/go-errorlint v1.4.8/go.mod h1:NNCxFcFjZcw3xNjVdCchERkEM6Oz7wta2XJVxRftwO4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig= github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/prometheus/alertmanager v0.26.0/go.mod h1:rVcnARltVjavgVaNnmevxK7kOn7IZavyf0KNgHkbEpU= github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= @@ -753,13 +1029,20 @@ github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOA github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.51.0 h1:vT5R9NAlW4V6k8Wruk7ikrHaHRsrPbduM/cKTOdQM/k= github.com/prometheus/common v0.51.0/go.mod h1:wHFBCEVWVmHMUpg7pYcOm2QUR/ocQdYSJVQJKnHc3xQ= +github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= +github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o= github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g= github.com/prometheus/prometheus v0.50.1 h1:N2L+DYrxqPh4WZStU+o1p/gQlBaqFbcLBTjlp3vpdXw= github.com/prometheus/prometheus v0.50.1/go.mod h1:FvE8dtQ1Ww63IlyKBn1V4s+zMwF9kHkVNkQBR1pM4CU= +github.com/quasilyte/go-ruleguard v0.4.0/go.mod h1:Eu76Z/R8IXtViWUIHkE3p8gdH3/PKk1eh3YGfaEof10= +github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= @@ -777,19 +1060,27 @@ github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDN github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50= +github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= +github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= +github.com/sashamelentyev/usestdlibvars v1.25.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= github.com/scalalang2/golang-fifo v0.1.5 h1:cl70TQhlMGGpI2DZGcr+7/GFTJOjHMeor0t7wynEEoA= github.com/scalalang2/golang-fifo v0.1.5/go.mod h1:IK3OZBg7iHbVdQVGPDjcW1MWPb6JcWjaS/w0iRBS8gs= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.22/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= +github.com/securego/gosec/v2 v2.19.0/go.mod h1:hOkDcHz9J/XIgIlPDXalxjeVYsHxoWUc5zJSHxcB8YM= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500 h1:WnNuhiq+FOY3jNj6JXFT+eLN3CQ/oPIsDPRanvwsmbI= github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500/go.mod h1:+njLrG5wSeoG4Ds61rFgEzKvenR2UHbjMoDHsczxly0= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= github.com/shirou/gopsutil/v3 v3.21.12/go.mod h1:BToYZVTlSVlfazpDDYFnsVZLaoRG+g8ufT6fPQLdJzA= github.com/shirou/gopsutil/v3 v3.24.1 h1:R3t6ondCEvmARp3wxODhXMTLC/klMa87h2PHUw5m7QI= github.com/shirou/gopsutil/v3 v3.24.1/go.mod h1:UU7a2MSBQa+kW1uuDq8DeEBS8kmrnQwsv2b5O513rwU= @@ -810,17 +1101,23 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= +github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY= +github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo= github.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0 h1:IJ3DuWHPTJrsqtIqjfdmPTELdTFGefvrOa2eTeRBleQ= github.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:V952P4GGl1v/MMynLwxVdWEbSZJx+n0oOO3ljnez+WU= github.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67 h1:8ZnTA26bBOoPkAbbitKPgNlpw0Bwt7ZlpYgZWHWJR/w= github.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:tNZjgbYncKL5HxvDULAr/mWDmFz4B7H8yrXEDlnoIiw= +github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= @@ -830,14 +1127,18 @@ github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= github.com/spkg/bom v1.0.0 h1:S939THe0ukL5WcTGiGqkgtaW5JW+O6ITaIlpJXTYY64= github.com/spkg/bom v1.0.0/go.mod h1:lAz2VbTuYNcvs7iaFF8WW0ufXrHShJ7ck1fYFFbVXJs= +github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stathat/consistent v1.0.0 h1:ZFJ1QTRn8npNBKW065raSZ8xfOqhpb8vLOkfp4CcL/U= github.com/stathat/consistent v1.0.0/go.mod h1:uajTPbgSygZBJ+V+0mY7meZ8i0XAcZs7AQ6V121XSxw= +github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -857,12 +1158,15 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM= github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= +github.com/tetafro/godot v1.4.16/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 h1:mbAskLJ0oJfDRtkanvQPiooDH8HvJ2FBh+iKT/OmiQQ= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU= github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a h1:J/YdBZ46WKpXsxsW93SG+q0F8KI+yFrcIDT4c/RNoc4= @@ -875,6 +1179,7 @@ github.com/tikv/pd/client v0.0.0-20240322051414-fb9e2d561b6e h1:u2OoEvmh3qyjIiAK github.com/tikv/pd/client v0.0.0-20240322051414-fb9e2d561b6e/go.mod h1:Z/QAgOt29zvwBTd0H6pdx45VO6KRNc/O/DzGkVmSyZg= github.com/timakin/bodyclose v0.0.0-20240125160201-f835fa56326a h1:A6uKudFIfAEpoPdaal3aSqGxBzLyU8TqyXImLwo6dIo= github.com/timakin/bodyclose v0.0.0-20240125160201-f835fa56326a/go.mod h1:mkjARE7Yr8qU23YcGMSALbIxTQ9r9QBVahQOBRfU460= +github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg= github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= @@ -883,6 +1188,8 @@ github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+F github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= +github.com/tomarrell/wrapcheck/v2 v2.8.1/go.mod h1:/n2Q3NZ4XFT50ho6Hbxg+RV1uyo2Uow/Vdm9NQcl5SE= +github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg= github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= github.com/uber/jaeger-client-go v2.22.1+incompatible h1:NHcubEkVbahf9t3p75TOCR83gdUHXjRJvjoBh1yACsM= @@ -891,13 +1198,18 @@ github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVK github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4= +github.com/ultraware/whitespace v0.1.0/go.mod h1:/se4r3beMFNmewJ4Xmz0nMQ941GJt+qmSHGP9emHYe0= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/vbauerster/mpb/v7 v7.5.3 h1:BkGfmb6nMrrBQDFECR/Q7RkKCw7ylMetCb4079CGs4w= github.com/vbauerster/mpb/v7 v7.5.3/go.mod h1:i+h4QY6lmLvBNK2ah1fSreiw3ajskRlBp9AhY/PnuOE= +github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= github.com/wangjohn/quickselect v0.0.0-20161129230411-ed8402a42d5f h1:9DDCDwOyEy/gId+IEMrFHLuQ5R/WV0KNxWLler8X2OY= github.com/wangjohn/quickselect v0.0.0-20161129230411-ed8402a42d5f/go.mod h1:8sdOQnirw1PrcnTJYkmW1iOHtUmblMmGdUOHyWYycLI= github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= @@ -905,6 +1217,8 @@ github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0 github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk= github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xitongsys/parquet-go v1.5.1/go.mod h1:xUxwM8ELydxh4edHGegYq1pA8NnMKDx0K/GyB0o2bww= @@ -914,7 +1228,10 @@ github.com/xitongsys/parquet-go-source v0.0.0-20190524061010-2b72cbee77d5/go.mod github.com/xitongsys/parquet-go-source v0.0.0-20200817004010-026bad9b25d0 h1:a742S4V5A15F93smuVxA60LQWsrCnN8bKeWDBARU1/k= github.com/xitongsys/parquet-go-source v0.0.0-20200817004010-026bad9b25d0/go.mod h1:HYhIKsdns7xz80OgkbgJYrtQY7FjHWHKH6cvN7+czGE= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA= +github.com/ykadowak/zerologlint v0.1.5/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg= github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= @@ -928,6 +1245,9 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +gitlab.com/bosi/decorder v0.4.1/go.mod h1:jecSqWUew6Yle1pCr2eLWTensJMmsxHsBwt+PVbkAqA= +go-simpler.org/musttag v0.8.0/go.mod h1:fiNdCkXt2S6je9Eblma3okjnlva9NT1Eg/WUt19rWu8= +go-simpler.org/sloglint v0.4.0/go.mod h1:v6zJ++j/thFPhefs2wEXoCKwT10yo5nkBDYRCXyqgNQ= go.einride.tech/aip v0.66.0 h1:XfV+NQX6L7EOYK11yoHHFtndeaWh3KbD9/cN/6iWEt8= go.einride.tech/aip v0.66.0/go.mod h1:qAhMsfT7plxBX+Oy7Huol6YUvZ0ZzdUz26yZsQwfl1M= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= @@ -941,6 +1261,7 @@ go.etcd.io/etcd/client/v2 v2.305.12 h1:0m4ovXYo1CHaA/Mp3X/Fak5sRNIWf01wk/X1/G3sG go.etcd.io/etcd/client/v2 v2.305.12/go.mod h1:aQ/yhsxMu+Oht1FOupSr60oBvcS9cKXHrzBpDsPTf9E= go.etcd.io/etcd/client/v3 v3.5.12 h1:v5lCPXn1pf1Uu3M4laUE2hp/geOTc5uPcYYsNe1lDxg= go.etcd.io/etcd/client/v3 v3.5.12/go.mod h1:tSbBCakoWmmddL+BKVAJHa9km+O/E+bumDe9mSbPiqw= +go.etcd.io/etcd/etcdutl/v3 v3.5.12/go.mod h1:U023wujJQo/2EeSrjPDnmFdCX6TC6Q6W9pAvuWKaaJE= go.etcd.io/etcd/pkg/v3 v3.5.12 h1:OK2fZKI5hX/+BTK76gXSTyZMrbnARyX9S643GenNGb8= go.etcd.io/etcd/pkg/v3 v3.5.12/go.mod h1:UVwg/QIMoJncyeb/YxvJBJCE/NEwtHWashqc8A1nj/M= go.etcd.io/etcd/raft/v3 v3.5.12 h1:7r22RufdDsq2z3STjoR7Msz6fYH8tmbkdheGfwJNRmU= @@ -949,6 +1270,8 @@ go.etcd.io/etcd/server/v3 v3.5.12 h1:EtMjsbfyfkwZuA2JlKOiBfuGkFCekv5H178qjXypbG8 go.etcd.io/etcd/server/v3 v3.5.12/go.mod h1:axB0oCjMy+cemo5290/CutIjoxlfA6KVYKD1w0uue10= go.etcd.io/etcd/tests/v3 v3.5.12 h1:k1fG7+F87Z7zKp57EcjXu9XgOsW0sfp5USqfzmMTIwM= go.etcd.io/etcd/tests/v3 v3.5.12/go.mod h1:CLWdnlr8bWNa8tjkmKFybPz5Ldjh9GuHbYhq1g9vpIo= +go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= +go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -956,6 +1279,9 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/collector/featuregate v1.0.1/go.mod h1:QQXjP4etmJQhkQ20j4P/rapWuItYxoFozg/iIwuKnYg= +go.opentelemetry.io/collector/pdata v1.0.1/go.mod h1:jutXeu0QOXYY8wcZ/hege+YAnSBP3+jpTqYU1+JTI5Y= +go.opentelemetry.io/collector/semconv v0.93.0/go.mod h1:gZ0uzkXsN+J5NpiRcdp9xOhNGQDDui8Y62p15sKrlzo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= @@ -966,6 +1292,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYa go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 h1:H2JFgRcGiyHg7H7bwcwaQJYrNFqCqrbTQ8K4p1OvDu8= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0/go.mod h1:WfCWp1bGoYK8MeULtI15MmQVczfR+bFkk0DF3h06QmQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY= go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= @@ -1055,6 +1382,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1226,6 +1554,7 @@ golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/telemetry v0.0.0-20240208230135-b75ee8823808/go.mod h1:KG1lNk5ZFNssSZLrpVb4sMXKMpGwGXOxSG3rnu2gZQQ= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1404,6 +1733,7 @@ google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJ google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8 h1:8eadJkXbwDEMNwcB5O0s5Y5eCfyuCLdvaiOIaGTrWmQ= google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:SCz6T5xjNXM4QFPRwxHcfChp7V+9DcXR3ay2TkHR8Tg= google.golang.org/genproto/googleapis/rpc v0.0.0-20240308144416-29370a3891b7 h1:em/y72n4XlYRtayY/cVj6pnVzHa//BDA1BdoO+z9mdE= google.golang.org/genproto/googleapis/rpc v0.0.0-20240308144416-29370a3891b7/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -1455,6 +1785,7 @@ gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/R gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= @@ -1479,6 +1810,7 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20220512140231-539c8e751b99/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1494,10 +1826,22 @@ k8s.io/apimachinery v0.28.6 h1:RsTeR4z6S07srPg6XYrwXpTJVMXsjPXn0ODakMytSW0= k8s.io/apimachinery v0.28.6/go.mod h1:QFNX/kCl/EMT2WTSz8k4WLCv2XnkOLMaL8GAVRMdpsA= k8s.io/client-go v0.28.6 h1:Gge6ziyIdafRchfoBKcpaARuz7jfrK1R1azuwORIsQI= k8s.io/client-go v0.28.6/go.mod h1:+nu0Yp21Oeo/cBCsprNVXB2BfJTV51lFfe5tXl2rUL8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +modernc.org/golex v1.1.0/go.mod h1:2pVlfqApurXhR1m0N+WDYu6Twnc4QuvO4+U8HnwoiRA= +modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/parser v1.1.0/go.mod h1:CXl3OTJRZij8FeMpzI3Id/bjupHf0u9HSrCUP4Z9pbA= +modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss= +modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= +modernc.org/y v1.1.0/go.mod h1:Iz3BmyIS4OwAbwGaUS7cqRrLsSsfp2sFWtpzX+P4CsE= +mvdan.cc/gofumpt v0.6.0/go.mod h1:4L0wf+kgIPZtcCWXynNS2e6bhmj73umwnuXSZarixzA= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14/go.mod h1:ZzZjEpJDOmx8TdVU6umamY3Xy0UAQUI2DHbf05USVbI= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/pkg/planner/core/common_plans.go b/pkg/planner/core/common_plans.go index 65a1ba944c42f..a6e1b19cb7380 100644 --- a/pkg/planner/core/common_plans.go +++ b/pkg/planner/core/common_plans.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/property" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/table" @@ -858,16 +859,16 @@ func (e *Explain) RenderResult() error { pp, ok := e.TargetPlan.(PhysicalPlan) if ok { if _, err := getPlanCost(pp, property.RootTaskType, - NewDefaultPlanCostOption().WithCostFlag(CostFlagRecalculate|CostFlagUseTrueCardinality|CostFlagTrace)); err != nil { + coreusage.NewDefaultPlanCostOption().WithCostFlag(coreusage.CostFlagRecalculate|coreusage.CostFlagUseTrueCardinality|coreusage.CostFlagTrace)); err != nil { return err } if pp.SCtx().GetSessionVars().CostModelVersion == modelVer2 { // output cost formula and factor costs through warning under model ver2 and true_card_cost mode for cost calibration. - cost, _ := pp.getPlanCostVer2(property.RootTaskType, NewDefaultPlanCostOption()) - if cost.trace != nil { - trace := cost.trace - pp.SCtx().GetSessionVars().StmtCtx.AppendWarning(errors.NewNoStackErrorf("cost formula: %v", trace.formula)) - data, err := json.Marshal(trace.factorCosts) + cost, _ := pp.GetPlanCostVer2(property.RootTaskType, coreusage.NewDefaultPlanCostOption()) + if cost.GetTrace() != nil { + trace := cost.GetTrace() + pp.SCtx().GetSessionVars().StmtCtx.AppendWarning(errors.NewNoStackErrorf("cost formula: %v", trace.GetFormula())) + data, err := json.Marshal(trace.GetFactorCosts()) if err != nil { pp.SCtx().GetSessionVars().StmtCtx.AppendWarning(errors.NewNoStackErrorf("marshal factor costs error %v", err)) } @@ -877,7 +878,7 @@ func (e *Explain) RenderResult() error { factors := defaultVer2Factors.tolist() weights := make(map[string]float64) for _, factor := range factors { - if factorCost, ok := trace.factorCosts[factor.Name]; ok && factor.Value > 0 { + if factorCost, ok := trace.GetFactorCosts()[factor.Name]; ok && factor.Value > 0 { weights[factor.Name] = factorCost / factor.Value // cost = [factors] * [weights] } } @@ -897,7 +898,7 @@ func (e *Explain) RenderResult() error { if pp, ok := e.TargetPlan.(PhysicalPlan); ok { // trigger getPlanCost again with CostFlagTrace to record all cost formulas if _, err := getPlanCost(pp, property.RootTaskType, - NewDefaultPlanCostOption().WithCostFlag(CostFlagRecalculate|CostFlagTrace)); err != nil { + coreusage.NewDefaultPlanCostOption().WithCostFlag(coreusage.CostFlagRecalculate|coreusage.CostFlagTrace)); err != nil { return err } } @@ -1142,15 +1143,15 @@ func (e *Explain) getOperatorInfo(p Plan, id string) (estRows, estCost, costForm estCost = "N/A" costFormula = "N/A" if isPhysicalPlan { - estRows = strconv.FormatFloat(pp.getEstRowCountForDisplay(), 'f', 2, 64) + estRows = strconv.FormatFloat(pp.GetEstRowCountForDisplay(), 'f', 2, 64) if e.SCtx() != nil && e.SCtx().GetSessionVars().CostModelVersion == modelVer2 { - costVer2, _ := pp.getPlanCostVer2(property.RootTaskType, NewDefaultPlanCostOption()) - estCost = strconv.FormatFloat(costVer2.cost, 'f', 2, 64) - if costVer2.trace != nil { - costFormula = costVer2.trace.formula + costVer2, _ := pp.GetPlanCostVer2(property.RootTaskType, coreusage.NewDefaultPlanCostOption()) + estCost = strconv.FormatFloat(costVer2.GetCost(), 'f', 2, 64) + if costVer2.GetTrace() != nil { + costFormula = costVer2.GetTrace().GetFormula() } } else { - planCost, _ := getPlanCost(pp, property.RootTaskType, NewDefaultPlanCostOption()) + planCost, _ := getPlanCost(pp, property.RootTaskType, coreusage.NewDefaultPlanCostOption()) estCost = strconv.FormatFloat(planCost, 'f', 2, 64) } } else if si := p.StatsInfo(); si != nil { @@ -1219,9 +1220,9 @@ func binaryOpTreeFromFlatOps(explainCtx PlanContext, ops FlatPlanTree) *tipb.Exp return &s[0] } -func binaryOpFromFlatOp(explainCtx PlanContext, op *FlatOperator, out *tipb.ExplainOperator) { - out.Name = op.Origin.ExplainID().String() - switch op.Label { +func binaryOpFromFlatOp(explainCtx PlanContext, fop *FlatOperator, out *tipb.ExplainOperator) { + out.Name = fop.Origin.ExplainID().String() + switch fop.Label { case BuildSide: out.Labels = []tipb.OperatorLabel{tipb.OperatorLabel_buildSide} case ProbeSide: @@ -1231,7 +1232,7 @@ func binaryOpFromFlatOp(explainCtx PlanContext, op *FlatOperator, out *tipb.Expl case RecursivePart: out.Labels = []tipb.OperatorLabel{tipb.OperatorLabel_recursivePart} } - switch op.StoreType { + switch fop.StoreType { case kv.TiDB: out.StoreType = tipb.StoreType_tidb case kv.TiKV: @@ -1239,10 +1240,10 @@ func binaryOpFromFlatOp(explainCtx PlanContext, op *FlatOperator, out *tipb.Expl case kv.TiFlash: out.StoreType = tipb.StoreType_tiflash } - if op.IsRoot { + if fop.IsRoot { out.TaskType = tipb.TaskType_root } else { - switch op.ReqType { + switch fop.ReqType { case Cop: out.TaskType = tipb.TaskType_cop case BatchCop: @@ -1252,16 +1253,16 @@ func binaryOpFromFlatOp(explainCtx PlanContext, op *FlatOperator, out *tipb.Expl } } - if op.IsPhysicalPlan { - p := op.Origin.(PhysicalPlan) - out.Cost, _ = getPlanCost(p, property.RootTaskType, NewDefaultPlanCostOption()) - out.EstRows = p.getEstRowCountForDisplay() - } else if statsInfo := op.Origin.StatsInfo(); statsInfo != nil { + if fop.IsPhysicalPlan { + p := fop.Origin.(PhysicalPlan) + out.Cost, _ = getPlanCost(p, property.RootTaskType, coreusage.NewDefaultPlanCostOption()) + out.EstRows = p.GetEstRowCountForDisplay() + } else if statsInfo := fop.Origin.StatsInfo(); statsInfo != nil { out.EstRows = statsInfo.RowCount } // Runtime info - rootStats, copStats, memTracker, diskTracker := getRuntimeInfo(explainCtx, op.Origin, nil) + rootStats, copStats, memTracker, diskTracker := getRuntimeInfo(explainCtx, fop.Origin, nil) if rootStats != nil { basic, groups := rootStats.MergeStats() if basic != nil { @@ -1291,14 +1292,14 @@ func binaryOpFromFlatOp(explainCtx PlanContext, op *FlatOperator, out *tipb.Expl } // Operator info - if plan, ok := op.Origin.(dataAccesser); ok { + if plan, ok := fop.Origin.(dataAccesser); ok { out.OperatorInfo = plan.OperatorInfo(false) } else { - out.OperatorInfo = op.Origin.ExplainInfo() + out.OperatorInfo = fop.Origin.ExplainInfo() } // Access object - switch p := op.Origin.(type) { + switch p := fop.Origin.(type) { case dataAccesser: ao := p.AccessObject() if ao != nil { diff --git a/pkg/planner/core/encode.go b/pkg/planner/core/encode.go index 46caf11d9188c..b11d8fff4da11 100644 --- a/pkg/planner/core/encode.go +++ b/pkg/planner/core/encode.go @@ -61,26 +61,26 @@ func EncodeFlatPlan(flat *FlatPhysicalPlan) string { buf.Grow(80 * opCount) encodeFlatPlanTree(flat.Main, 0, &buf) for _, cte := range flat.CTEs { - op := cte[0] + fop := cte[0] cteDef := cte[0].Origin.(*CTEDefinition) id := cteDef.CTE.IDForStorage tp := plancodec.TypeCTEDefinition - taskTypeInfo := plancodec.EncodeTaskType(op.IsRoot, op.StoreType) - p := op.Origin + taskTypeInfo := plancodec.EncodeTaskType(fop.IsRoot, fop.StoreType) + p := fop.Origin actRows, analyzeInfo, memoryInfo, diskInfo := getRuntimeInfoStr(p.SCtx(), p, nil) var estRows float64 - if op.IsPhysicalPlan { - estRows = op.Origin.(PhysicalPlan).getEstRowCountForDisplay() + if fop.IsPhysicalPlan { + estRows = fop.Origin.(PhysicalPlan).GetEstRowCountForDisplay() } else if statsInfo := p.StatsInfo(); statsInfo != nil { estRows = statsInfo.RowCount } plancodec.EncodePlanNode( - int(op.Depth), - strconv.Itoa(id)+op.Label.String(), + int(fop.Depth), + strconv.Itoa(id)+fop.Label.String(), tp, estRows, taskTypeInfo, - op.Origin.ExplainInfo(), + fop.Origin.ExplainInfo(), actRows, analyzeInfo, memoryInfo, @@ -96,23 +96,23 @@ func EncodeFlatPlan(flat *FlatPhysicalPlan) string { func encodeFlatPlanTree(flatTree FlatPlanTree, offset int, buf *bytes.Buffer) { for i := 0; i < len(flatTree); { - op := flatTree[i] - taskTypeInfo := plancodec.EncodeTaskType(op.IsRoot, op.StoreType) - p := op.Origin + fop := flatTree[i] + taskTypeInfo := plancodec.EncodeTaskType(fop.IsRoot, fop.StoreType) + p := fop.Origin actRows, analyzeInfo, memoryInfo, diskInfo := getRuntimeInfoStr(p.SCtx(), p, nil) var estRows float64 - if op.IsPhysicalPlan { - estRows = op.Origin.(PhysicalPlan).getEstRowCountForDisplay() + if fop.IsPhysicalPlan { + estRows = fop.Origin.(PhysicalPlan).GetEstRowCountForDisplay() } else if statsInfo := p.StatsInfo(); statsInfo != nil { estRows = statsInfo.RowCount } plancodec.EncodePlanNode( - int(op.Depth), - strconv.Itoa(op.Origin.ID())+op.Label.String(), - op.Origin.TP(), + int(fop.Depth), + strconv.Itoa(fop.Origin.ID())+fop.Label.String(), + fop.Origin.TP(), estRows, taskTypeInfo, - op.Origin.ExplainInfo(), + fop.Origin.ExplainInfo(), actRows, analyzeInfo, memoryInfo, @@ -120,16 +120,16 @@ func encodeFlatPlanTree(flatTree FlatPlanTree, offset int, buf *bytes.Buffer) { buf, ) - if op.NeedReverseDriverSide { + if fop.NeedReverseDriverSide { // If NeedReverseDriverSide is true, we don't rely on the order of flatTree. // Instead, we manually slice the build and probe side children from flatTree and recursively call // encodeFlatPlanTree to keep build side before probe side. - buildSide := flatTree[op.ChildrenIdx[1]-offset : op.ChildrenEndIdx+1-offset] - probeSide := flatTree[op.ChildrenIdx[0]-offset : op.ChildrenIdx[1]-offset] - encodeFlatPlanTree(buildSide, op.ChildrenIdx[1], buf) - encodeFlatPlanTree(probeSide, op.ChildrenIdx[0], buf) + buildSide := flatTree[fop.ChildrenIdx[1]-offset : fop.ChildrenEndIdx+1-offset] + probeSide := flatTree[fop.ChildrenIdx[0]-offset : fop.ChildrenIdx[1]-offset] + encodeFlatPlanTree(buildSide, fop.ChildrenIdx[1], buf) + encodeFlatPlanTree(probeSide, fop.ChildrenIdx[0], buf) // Skip the children plan tree of the current operator. - i = op.ChildrenEndIdx + 1 - offset + i = fop.ChildrenEndIdx + 1 - offset } else { // Normally, we just go to the next element in the slice. i++ @@ -210,7 +210,7 @@ func (pn *planEncoder) encodePlan(p Plan, isRoot bool, store kv.StoreType, depth actRows, analyzeInfo, memoryInfo, diskInfo := getRuntimeInfoStr(p.SCtx(), p, nil) rowCount := 0.0 if pp, ok := p.(PhysicalPlan); ok { - rowCount = pp.getEstRowCountForDisplay() + rowCount = pp.GetEstRowCountForDisplay() } else if statsInfo := p.StatsInfo(); statsInfo != nil { rowCount = statsInfo.RowCount } @@ -283,12 +283,12 @@ func NormalizeFlatPlan(flat *FlatPhysicalPlan) (normalized string, digest *parse }() // assume an operator costs around 30 bytes, preallocate space for them d.buf.Grow(30 * len(selectPlan)) - for _, op := range selectPlan { - taskTypeInfo := plancodec.EncodeTaskTypeForNormalize(op.IsRoot, op.StoreType) - p := op.Origin.(PhysicalPlan) + for _, fop := range selectPlan { + taskTypeInfo := plancodec.EncodeTaskTypeForNormalize(fop.IsRoot, fop.StoreType) + p := fop.Origin.(PhysicalPlan) plancodec.NormalizePlanNode( - int(op.Depth-uint32(selectPlanOffset)), - op.Origin.TP(), + int(fop.Depth-uint32(selectPlanOffset)), + fop.Origin.TP(), taskTypeInfo, p.ExplainNormalizedInfo(), &d.buf, diff --git a/pkg/planner/core/exhaust_physical_plans.go b/pkg/planner/core/exhaust_physical_plans.go index 9c4cedf0901a0..71c45e3edf8f7 100644 --- a/pkg/planner/core/exhaust_physical_plans.go +++ b/pkg/planner/core/exhaust_physical_plans.go @@ -462,7 +462,7 @@ func (p *LogicalJoin) getHashJoin(prop *property.PhysicalProperty, innerIdx int, func (p *LogicalJoin) constructIndexJoin( prop *property.PhysicalProperty, outerIdx int, - innerTask task, + innerTask Task, ranges ranger.MutableRanges, keyOff2IdxOff []int, path *util.AccessPath, @@ -576,7 +576,7 @@ func (p *LogicalJoin) constructIndexJoin( func (p *LogicalJoin) constructIndexMergeJoin( prop *property.PhysicalProperty, outerIdx int, - innerTask task, + innerTask Task, ranges ranger.MutableRanges, keyOff2IdxOff []int, path *util.AccessPath, @@ -683,7 +683,7 @@ func (p *LogicalJoin) constructIndexMergeJoin( func (p *LogicalJoin) constructIndexHashJoin( prop *property.PhysicalProperty, outerIdx int, - innerTask task, + innerTask Task, ranges ranger.MutableRanges, keyOff2IdxOff []int, path *util.AccessPath, @@ -831,7 +831,7 @@ func (p *LogicalJoin) buildIndexJoinInner2TableScan( keyOff2IdxOff := make([]int, len(innerJoinKeys)) newOuterJoinKeys := make([]*expression.Column, 0) var ranges ranger.MutableRanges = ranger.Ranges{} - var innerTask, innerTask2 task + var innerTask, innerTask2 Task var helper *indexJoinBuildHelper if ds.tableInfo.IsCommonHandle { helper, keyOff2IdxOff = p.getIndexJoinBuildHelper(ds, innerJoinKeys, func(path *util.AccessPath) bool { return path.IsCommonHandlePath }, outerJoinKeys) @@ -1023,7 +1023,7 @@ func (p *LogicalJoin) constructInnerTableScanTask( keepOrder bool, desc bool, rowCount float64, -) task { +) Task { ds := wrapper.ds // If `ds.tableInfo.GetPartitionInfo() != nil`, // it means the data source is a partition table reader. @@ -1088,9 +1088,9 @@ func (p *LogicalJoin) constructInnerTableScanTask( ts.PlanPartInfo = copTask.physPlanPartInfo selStats := ts.StatsInfo().Scale(selectivity) ts.addPushedDownSelection(copTask, selStats) - t := copTask.convertToRootTask(ds.SCtx()) - reader := t.p - t.p = p.constructInnerByWrapper(wrapper, reader) + t := copTask.ConvertToRootTask(ds.SCtx()) + reader := t.GetPlan() + t.SetPlan(p.constructInnerByWrapper(wrapper, reader)) return t } @@ -1209,7 +1209,7 @@ func (p *LogicalJoin) constructInnerIndexScanTask( desc bool, rowCount float64, maxOneRow bool, -) task { +) Task { ds := wrapper.ds // If `ds.tableInfo.GetPartitionInfo() != nil`, // it means the data source is a partition table reader. @@ -1376,9 +1376,9 @@ func (p *LogicalJoin) constructInnerIndexScanTask( } finalStats := ds.tableStats.ScaleByExpectCnt(rowCount) is.addPushedDownSelection(cop, ds, tmpPath, finalStats) - t := cop.convertToRootTask(ds.SCtx()) - reader := t.p - t.p = p.constructInnerByWrapper(wrapper, reader) + t := cop.ConvertToRootTask(ds.SCtx()) + reader := t.GetPlan() + t.SetPlan(p.constructInnerByWrapper(wrapper, reader)) return t } @@ -2565,7 +2565,7 @@ func (p *LogicalProjection) TryToGetChildProp(prop *property.PhysicalProperty) ( return newProp, true } -// exhaustPhysicalPlans enumerate all the possible physical plan for expand operator (currently only mpp case is supported) +// exhaustop.PhysicalPlans enumerate all the possible physical plan for expand operator (currently only mpp case is supported) func (p *LogicalExpand) exhaustPhysicalPlans(prop *property.PhysicalProperty) ([]PhysicalPlan, bool, error) { // under the mpp task type, if the sort item is not empty, refuse it, cause expanded data doesn't support any sort items. if !prop.IsSortItemEmpty() { @@ -2942,9 +2942,9 @@ func (lw *LogicalWindow) exhaustPhysicalPlans(prop *property.PhysicalProperty) ( return windows, true, nil } -// exhaustPhysicalPlans is only for implementing interface. DataSource and Dual generate task in `findBestTask` directly. +// exhaustop.PhysicalPlans is only for implementing interface. DataSource and Dual generate task in `findBestTask` directly. func (*baseLogicalPlan) exhaustPhysicalPlans(*property.PhysicalProperty) ([]PhysicalPlan, bool, error) { - panic("baseLogicalPlan.exhaustPhysicalPlans() should never be called.") + panic("baseLogicalPlan.exhaustop.PhysicalPlans() should never be called.") } // canPushToCop checks if it can be pushed to some stores. For TiKV, it only checks datasource. diff --git a/pkg/planner/core/find_best_task.go b/pkg/planner/core/find_best_task.go index 8093e2642d9c2..382b50d17157a 100644 --- a/pkg/planner/core/find_best_task.go +++ b/pkg/planner/core/find_best_task.go @@ -32,6 +32,7 @@ import ( "github.com/pingcap/tidb/pkg/planner/cardinality" "github.com/pingcap/tidb/pkg/planner/property" "github.com/pingcap/tidb/pkg/planner/util" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/planner/util/fixcontrol" "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/types" @@ -102,7 +103,7 @@ func (c *PlanCounterTp) IsForce() bool { return *c != -1 } -var invalidTask = &rootTask{p: nil} // invalid if p is nil +var invalidTask = &RootTask{} // invalid if p is nil // GetPropByOrderByItems will check if this sort property can be pushed or not. In order to simplify the problem, we only // consider the case that all expression are columns. @@ -141,7 +142,7 @@ func GetPropByOrderByItemsContainScalarFunc(items []*util.ByItems) (*property.Ph return &property.PhysicalProperty{SortItems: propItems}, true, onlyColumn } -func (p *LogicalTableDual) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *physicalOptimizeOp) (task, int64, error) { +func (p *LogicalTableDual) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *coreusage.PhysicalOptimizeOp) (Task, int64, error) { // If the required property is not empty and the row count > 1, // we cannot ensure this required property. // But if the row count is 0 or 1, we don't need to care about the property. @@ -153,32 +154,39 @@ func (p *LogicalTableDual) findBestTask(prop *property.PhysicalProperty, planCou }.Init(p.SCtx(), p.StatsInfo(), p.QueryBlockOffset()) dual.SetSchema(p.schema) planCounter.Dec(1) - opt.appendCandidate(p, dual, prop) - return &rootTask{p: dual, isEmpty: p.RowCount == 0}, 1, nil + appendCandidate4PhysicalOptimizeOp(opt, p, dual, prop) + rt := &RootTask{} + rt.SetPlan(dual) + rt.SetEmpty(p.RowCount == 0) + return rt, 1, nil } -func (p *LogicalShow) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, _ *physicalOptimizeOp) (task, int64, error) { +func (p *LogicalShow) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, _ *coreusage.PhysicalOptimizeOp) (Task, int64, error) { if !prop.IsSortItemEmpty() || planCounter.Empty() { return invalidTask, 0, nil } pShow := PhysicalShow{ShowContents: p.ShowContents, Extractor: p.Extractor}.Init(p.SCtx()) pShow.SetSchema(p.schema) planCounter.Dec(1) - return &rootTask{p: pShow}, 1, nil + rt := &RootTask{} + rt.SetPlan(pShow) + return rt, 1, nil } -func (p *LogicalShowDDLJobs) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, _ *physicalOptimizeOp) (task, int64, error) { +func (p *LogicalShowDDLJobs) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, _ *coreusage.PhysicalOptimizeOp) (Task, int64, error) { if !prop.IsSortItemEmpty() || planCounter.Empty() { return invalidTask, 0, nil } pShow := PhysicalShowDDLJobs{JobNumber: p.JobNumber}.Init(p.SCtx()) pShow.SetSchema(p.schema) planCounter.Dec(1) - return &rootTask{p: pShow}, 1, nil + rt := &RootTask{} + rt.SetPlan(pShow) + return rt, 1, nil } // rebuildChildTasks rebuilds the childTasks to make the clock_th combination. -func (p *baseLogicalPlan) rebuildChildTasks(childTasks *[]task, pp PhysicalPlan, childCnts []int64, planCounter int64, ts uint64, opt *physicalOptimizeOp) error { +func (p *baseLogicalPlan) rebuildChildTasks(childTasks *[]Task, pp PhysicalPlan, childCnts []int64, planCounter int64, ts uint64, opt *coreusage.PhysicalOptimizeOp) error { // The taskMap of children nodes should be rolled back first. for _, child := range p.children { child.rollBackTaskMap(ts) @@ -201,7 +209,7 @@ func (p *baseLogicalPlan) rebuildChildTasks(childTasks *[]task, pp PhysicalPlan, if curClock != 0 { return errors.Errorf("PlanCounterTp planCounter is not handled") } - if childTask != nil && childTask.invalid() { + if childTask != nil && childTask.Invalid() { return errors.Errorf("The current plan is invalid, please skip this plan") } *childTasks = append(*childTasks, childTask) @@ -214,12 +222,12 @@ func (p *baseLogicalPlan) enumeratePhysicalPlans4Task( prop *property.PhysicalProperty, addEnforcer bool, planCounter *PlanCounterTp, - opt *physicalOptimizeOp, -) (task, int64, error) { - var bestTask task = invalidTask + opt *coreusage.PhysicalOptimizeOp, +) (Task, int64, error) { + var bestTask Task = invalidTask var curCntPlan, cntPlan int64 var err error - childTasks := make([]task, 0, len(p.children)) + childTasks := make([]Task, 0, len(p.children)) childCnts := make([]int64, len(p.children)) cntPlan = 0 iteration := p.iteratePhysicalPlan @@ -252,14 +260,14 @@ func (p *baseLogicalPlan) enumeratePhysicalPlans4Task( } // Combine the best child tasks with parent physical plan. - curTask := pp.attach2Task(childTasks...) - if curTask.invalid() { + curTask := pp.Attach2Task(childTasks...) + if curTask.Invalid() { continue } // An optimal task could not satisfy the property, so it should be converted here. - if _, ok := curTask.(*rootTask); !ok && prop.TaskTp == property.RootTaskType { - curTask = curTask.convertToRootTask(p.SCtx()) + if _, ok := curTask.(*RootTask); !ok && prop.TaskTp == property.RootTaskType { + curTask = curTask.ConvertToRootTask(p.SCtx()) } // Enforce curTask property @@ -280,7 +288,7 @@ func (p *baseLogicalPlan) enumeratePhysicalPlans4Task( bestTask = curTask break } - opt.appendCandidate(p, curTask.plan(), prop) + appendCandidate4PhysicalOptimizeOp(opt, p, curTask.Plan(), prop) // Get the most efficient one. if curIsBetter, err := compareTaskCost(curTask, bestTask, opt); err != nil { return nil, 0, err @@ -294,11 +302,11 @@ func (p *baseLogicalPlan) enumeratePhysicalPlans4Task( // iteratePhysicalPlan is used to iterate the physical plan and get all child tasks. func (p *baseLogicalPlan) iteratePhysicalPlan( selfPhysicalPlan PhysicalPlan, - childTasks []task, + childTasks []Task, childCnts []int64, _ *property.PhysicalProperty, - opt *physicalOptimizeOp, -) ([]task, int64, []int64, error) { + opt *coreusage.PhysicalOptimizeOp, +) ([]Task, int64, []int64, error) { // Find best child tasks firstly. childTasks = childTasks[:0] // The curCntPlan records the number of possible plans for pp @@ -311,7 +319,7 @@ func (p *baseLogicalPlan) iteratePhysicalPlan( return nil, 0, childCnts, err } curCntPlan = curCntPlan * cnt - if childTask != nil && childTask.invalid() { + if childTask != nil && childTask.Invalid() { return nil, 0, childCnts, nil } childTasks = append(childTasks, childTask) @@ -327,11 +335,11 @@ func (p *baseLogicalPlan) iteratePhysicalPlan( // iterateChildPlan does the special part for sequence. We need to iterate its child one by one to check whether the former child is a valid plan and then go to the nex func (p *LogicalSequence) iterateChildPlan( selfPhysicalPlan PhysicalPlan, - childTasks []task, + childTasks []Task, childCnts []int64, prop *property.PhysicalProperty, - opt *physicalOptimizeOp, -) ([]task, int64, []int64, error) { + opt *coreusage.PhysicalOptimizeOp, +) ([]Task, int64, []int64, error) { // Find best child tasks firstly. childTasks = childTasks[:0] // The curCntPlan records the number of possible plans for pp @@ -346,7 +354,7 @@ func (p *LogicalSequence) iterateChildPlan( return nil, 0, nil, err } curCntPlan = curCntPlan * cnt - if childTask != nil && childTask.invalid() { + if childTask != nil && childTask.Invalid() { return nil, 0, nil, nil } _, isMpp := childTask.(*mppTask) @@ -370,7 +378,7 @@ func (p *LogicalSequence) iterateChildPlan( return nil, 0, nil, err } curCntPlan = curCntPlan * cnt - if lastChildTask != nil && lastChildTask.invalid() { + if lastChildTask != nil && lastChildTask.Invalid() { return nil, 0, nil, nil } @@ -383,7 +391,7 @@ func (p *LogicalSequence) iterateChildPlan( } // compareTaskCost compares cost of curTask and bestTask and returns whether curTask's cost is smaller than bestTask's. -func compareTaskCost(curTask, bestTask task, op *physicalOptimizeOp) (curIsBetter bool, err error) { +func compareTaskCost(curTask, bestTask Task, op *coreusage.PhysicalOptimizeOp) (curIsBetter bool, err error) { curCost, curInvalid, err := getTaskPlanCost(curTask, op) if err != nil { return false, err @@ -404,8 +412,8 @@ func compareTaskCost(curTask, bestTask task, op *physicalOptimizeOp) (curIsBette // getTaskPlanCost returns the cost of this task. // The new cost interface will be used if EnableNewCostInterface is true. // The second returned value indicates whether this task is valid. -func getTaskPlanCost(t task, op *physicalOptimizeOp) (float64, bool, error) { - if t.invalid() { +func getTaskPlanCost(t Task, pop *coreusage.PhysicalOptimizeOp) (float64, bool, error) { + if t.Invalid() { return math.MaxFloat64, true, nil } @@ -415,7 +423,7 @@ func getTaskPlanCost(t task, op *physicalOptimizeOp) (float64, bool, error) { indexPartialCost float64 ) switch t.(type) { - case *rootTask: + case *RootTask: taskType = property.RootTaskType case *copTask: // no need to know whether the task is single-read or double-read, so both CopSingleReadTaskType and CopDoubleReadTaskType are OK cop := t.(*copTask) @@ -423,15 +431,15 @@ func getTaskPlanCost(t task, op *physicalOptimizeOp) (float64, bool, error) { taskType = property.CopMultiReadTaskType // keep compatible with the old cost interface, for CopMultiReadTask, the cost is idxCost + tblCost. if !cop.indexPlanFinished { // only consider index cost in this case - idxCost, err := getPlanCost(cop.indexPlan, taskType, NewDefaultPlanCostOption().WithOptimizeTracer(op)) + idxCost, err := getPlanCost(cop.indexPlan, taskType, coreusage.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) return idxCost, false, err } // consider both sides - idxCost, err := getPlanCost(cop.indexPlan, taskType, NewDefaultPlanCostOption().WithOptimizeTracer(op)) + idxCost, err := getPlanCost(cop.indexPlan, taskType, coreusage.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) if err != nil { return 0, false, err } - tblCost, err := getPlanCost(cop.tablePlan, taskType, NewDefaultPlanCostOption().WithOptimizeTracer(op)) + tblCost, err := getPlanCost(cop.tablePlan, taskType, coreusage.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) if err != nil { return 0, false, err } @@ -457,7 +465,7 @@ func getTaskPlanCost(t task, op *physicalOptimizeOp) (float64, bool, error) { // cost about table plan. if cop.indexPlanFinished && len(cop.idxMergePartPlans) != 0 { for _, partialScan := range cop.idxMergePartPlans { - partialCost, err := getPlanCost(partialScan, taskType, NewDefaultPlanCostOption().WithOptimizeTracer(op)) + partialCost, err := getPlanCost(partialScan, taskType, coreusage.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) if err != nil { return 0, false, err } @@ -469,13 +477,13 @@ func getTaskPlanCost(t task, op *physicalOptimizeOp) (float64, bool, error) { default: return 0, false, errors.New("unknown task type") } - if t.plan() == nil { + if t.Plan() == nil { // It's a very special case for index merge case. // t.plan() == nil in index merge COP case, it means indexPlanFinished is false in other words. cost := 0.0 copTsk := t.(*copTask) for _, partialScan := range copTsk.idxMergePartPlans { - partialCost, err := getPlanCost(partialScan, taskType, NewDefaultPlanCostOption().WithOptimizeTracer(op)) + partialCost, err := getPlanCost(partialScan, taskType, coreusage.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) if err != nil { return 0, false, err } @@ -483,33 +491,19 @@ func getTaskPlanCost(t task, op *physicalOptimizeOp) (float64, bool, error) { } return cost, false, nil } - cost, err := getPlanCost(t.plan(), taskType, NewDefaultPlanCostOption().WithOptimizeTracer(op)) + cost, err := getPlanCost(t.Plan(), taskType, coreusage.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) return cost + indexPartialCost, false, err } -type physicalOptimizeOp struct { - // tracer is goring to track optimize steps during physical optimizing - tracer *tracing.PhysicalOptimizeTracer -} - -func defaultPhysicalOptimizeOption() *physicalOptimizeOp { - return &physicalOptimizeOp{} -} - -func (op *physicalOptimizeOp) withEnableOptimizeTracer(tracer *tracing.PhysicalOptimizeTracer) *physicalOptimizeOp { - op.tracer = tracer - return op -} - -func (op *physicalOptimizeOp) appendCandidate(lp LogicalPlan, pp PhysicalPlan, prop *property.PhysicalProperty) { - if op == nil || op.tracer == nil || pp == nil { +func appendCandidate4PhysicalOptimizeOp(pop *coreusage.PhysicalOptimizeOp, lp LogicalPlan, pp PhysicalPlan, prop *property.PhysicalProperty) { + if pop == nil || pop.GetTracer() == nil || pp == nil { return } candidate := &tracing.CandidatePlanTrace{ PlanTrace: &tracing.PlanTrace{TP: pp.TP(), ID: pp.ID(), ExplainInfo: pp.ExplainInfo(), ProperType: prop.String()}, MappingLogicalPlan: tracing.CodecPlanName(lp.TP(), lp.ID())} - op.tracer.AppendCandidate(candidate) + pop.GetTracer().AppendCandidate(candidate) // for PhysicalIndexMergeJoin/PhysicalIndexHashJoin/PhysicalIndexJoin, it will use innerTask as a child instead of calling findBestTask, // and innerTask.plan() will be appended to planTree in appendChildCandidate using empty MappingLogicalPlan field, so it won't mapping with the logic plan, @@ -520,13 +514,13 @@ func (op *physicalOptimizeOp) appendCandidate(lp LogicalPlan, pp PhysicalPlan, p switch join := pp.(type) { case *PhysicalIndexMergeJoin: index = join.InnerChildIdx - plan = join.innerTask.plan() + plan = join.innerTask.Plan() case *PhysicalIndexHashJoin: index = join.InnerChildIdx - plan = join.innerTask.plan() + plan = join.innerTask.Plan() case *PhysicalIndexJoin: index = join.InnerChildIdx - plan = join.innerTask.plan() + plan = join.innerTask.Plan() } if index != -1 { child := lp.(*baseLogicalPlan).children[index] @@ -534,20 +528,20 @@ func (op *physicalOptimizeOp) appendCandidate(lp LogicalPlan, pp PhysicalPlan, p PlanTrace: &tracing.PlanTrace{TP: plan.TP(), ID: plan.ID(), ExplainInfo: plan.ExplainInfo(), ProperType: prop.String()}, MappingLogicalPlan: tracing.CodecPlanName(child.TP(), child.ID())} - op.tracer.AppendCandidate(candidate) + pop.GetTracer().AppendCandidate(candidate) } - pp.appendChildCandidate(op) + pp.AppendChildCandidate(pop) } -func (op *physicalOptimizeOp) appendPlanCostDetail(detail *tracing.PhysicalPlanCostDetail) { - if op == nil || op.tracer == nil { +func appendPlanCostDetail4PhysicalOptimizeOp(pop *coreusage.PhysicalOptimizeOp, detail *tracing.PhysicalPlanCostDetail) { + if pop == nil || pop.GetTracer() == nil { return } - op.tracer.PhysicalPlanCostDetails[fmt.Sprintf("%v_%v", detail.GetPlanType(), detail.GetPlanID())] = detail + pop.GetTracer().PhysicalPlanCostDetails[fmt.Sprintf("%v_%v", detail.GetPlanType(), detail.GetPlanID())] = detail } // findBestTask implements LogicalPlan interface. -func (p *baseLogicalPlan) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *physicalOptimizeOp) (bestTask task, cntPlan int64, err error) { +func (p *baseLogicalPlan) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *coreusage.PhysicalOptimizeOp) (bestTask Task, cntPlan int64, err error) { // If p is an inner plan in an IndexJoin, the IndexJoin will generate an inner plan by itself, // and set inner child prop nil, so here we do nothing. if prop == nil { @@ -616,7 +610,7 @@ func (p *baseLogicalPlan) findBestTask(prop *property.PhysicalProperty, planCoun } var cnt int64 - var curTask task + var curTask Task if bestTask, cnt, err = p.enumeratePhysicalPlans4Task(plansFitsProp, newProp, false, planCounter, opt); err != nil { return nil, 0, err } @@ -634,7 +628,7 @@ func (p *baseLogicalPlan) findBestTask(prop *property.PhysicalProperty, planCoun bestTask = curTask goto END } - opt.appendCandidate(p, curTask.plan(), prop) + appendCandidate4PhysicalOptimizeOp(opt, p, curTask.Plan(), prop) if curIsBetter, err := compareTaskCost(curTask, bestTask, opt); err != nil { return nil, 0, err } else if curIsBetter { @@ -646,7 +640,7 @@ END: return bestTask, cntPlan, nil } -func (p *LogicalMemTable) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *physicalOptimizeOp) (t task, cntPlan int64, err error) { +func (p *LogicalMemTable) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *coreusage.PhysicalOptimizeOp) (t Task, cntPlan int64, err error) { if prop.MPPPartitionTp != property.AnyType { return invalidTask, 0, nil } @@ -693,12 +687,14 @@ func (p *LogicalMemTable) findBestTask(prop *property.PhysicalProperty, planCoun }.Init(p.SCtx(), p.StatsInfo(), p.QueryBlockOffset()) memTable.SetSchema(p.schema) planCounter.Dec(1) - opt.appendCandidate(p, memTable, prop) - return &rootTask{p: memTable}, 1, nil + appendCandidate4PhysicalOptimizeOp(opt, p, memTable, prop) + rt := &RootTask{} + rt.SetPlan(memTable) + return rt, 1, nil } // tryToGetDualTask will check if the push down predicate has false constant. If so, it will return table dual. -func (ds *DataSource) tryToGetDualTask() (task, error) { +func (ds *DataSource) tryToGetDualTask() (Task, error) { for _, cond := range ds.pushedDownConds { if con, ok := cond.(*expression.Constant); ok && con.DeferredExpr == nil && con.ParamMarker == nil { result, _, err := expression.EvalBool(ds.SCtx().GetExprCtx(), []expression.Expression{cond}, chunk.Row{}) @@ -708,9 +704,9 @@ func (ds *DataSource) tryToGetDualTask() (task, error) { if !result { dual := PhysicalTableDual{}.Init(ds.SCtx(), ds.StatsInfo(), ds.QueryBlockOffset()) dual.SetSchema(ds.schema) - return &rootTask{ - p: dual, - }, nil + rt := &RootTask{} + rt.SetPlan(dual) + return rt, nil } } } @@ -1277,7 +1273,7 @@ func (ds *DataSource) exploreEnforcedPlan() bool { // findBestTask implements the PhysicalPlan interface. // It will enumerate all the available indices and choose a plan with least cost. -func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *physicalOptimizeOp) (t task, cntPlan int64, err error) { +func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *coreusage.PhysicalOptimizeOp) (t Task, cntPlan int64, err error) { // If ds is an inner plan in an IndexJoin, the IndexJoin will generate an inner plan by itself, // and set inner child prop nil, so here we do nothing. if prop == nil { @@ -1315,7 +1311,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter return } var cnt int64 - var unenforcedTask task + var unenforcedTask Task // If prop.CanAddEnforcer is true, the prop.SortItems need to be set nil for ds.findBestTask. // Before function return, reset it for enforcing task prop and storing map. oldProp := prop.CloneEssentialFields() @@ -1326,7 +1322,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter if err != nil { return nil, 0, err } - if !unenforcedTask.invalid() && !ds.exploreEnforcedPlan() { + if !unenforcedTask.Invalid() && !ds.exploreEnforcedPlan() { ds.storeTask(prop, unenforcedTask) return unenforcedTask, cnt, nil } @@ -1349,7 +1345,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter prop.CanAddEnforcer = true } - if unenforcedTask != nil && !unenforcedTask.invalid() { + if unenforcedTask != nil && !unenforcedTask.Invalid() { curIsBest, cerr := compareTaskCost(unenforcedTask, t, opt) if cerr != nil { err = cerr @@ -1377,7 +1373,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter candidates := ds.skylinePruning(prop) pruningInfo := ds.getPruningInfo(candidates, prop) defer func() { - if err == nil && t != nil && !t.invalid() && pruningInfo != "" { + if err == nil && t != nil && !t.Invalid() && pruningInfo != "" { warnErr := errors.NewNoStackError(pruningInfo) if ds.SCtx().GetSessionVars().StmtCtx.InVerboseExplain { ds.SCtx().GetSessionVars().StmtCtx.AppendNote(warnErr) @@ -1395,7 +1391,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter if err != nil { return nil, 0, err } - if !idxMergeTask.invalid() { + if !idxMergeTask.Invalid() { cntPlan++ planCounter.Dec(1) } @@ -1423,9 +1419,8 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter dual.SetSchema(ds.schema) cntPlan++ planCounter.Dec(1) - t := &rootTask{ - p: dual, - } + t := &RootTask{} + t.SetPlan(dual) appendCandidate(ds, t, prop, opt) return t, cntPlan, nil } @@ -1499,7 +1494,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter } } if allRangeIsPoint { - var pointGetTask task + var pointGetTask Task if len(path.Ranges) == 1 { pointGetTask = ds.convertToPointGet(prop, candidate) } else { @@ -1508,12 +1503,12 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter // Batch/PointGet plans may be over-optimized, like `a>=1(?) and a<=1(?)` --> `a=1` --> PointGet(a=1). // For safety, prevent these plans from the plan cache here. - if !pointGetTask.invalid() && expression.MaybeOverOptimized4PlanCache(ds.SCtx().GetExprCtx(), candidate.path.AccessConds) && !isSafePointGetPath4PlanCache(ds.SCtx(), candidate.path) { + if !pointGetTask.Invalid() && expression.MaybeOverOptimized4PlanCache(ds.SCtx().GetExprCtx(), candidate.path.AccessConds) && !isSafePointGetPath4PlanCache(ds.SCtx(), candidate.path) { ds.SCtx().GetSessionVars().StmtCtx.SetSkipPlanCache(errors.NewNoStackError("Batch/PointGet plans may be over-optimized")) } appendCandidate(ds, pointGetTask, prop, opt) - if !pointGetTask.invalid() { + if !pointGetTask.Invalid() { cntPlan++ planCounter.Dec(1) } @@ -1537,7 +1532,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter if ds.preferStoreType&h.PreferTiKV != 0 && path.StoreType == kv.TiFlash { continue } - var tblTask task + var tblTask Task if ds.SampleInfo != nil { tblTask, err = ds.convertToSampleTable(prop, candidate, opt) } else { @@ -1546,7 +1541,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter if err != nil { return nil, 0, err } - if !tblTask.invalid() { + if !tblTask.Invalid() { cntPlan++ planCounter.Dec(1) } @@ -1571,7 +1566,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter if err != nil { return nil, 0, err } - if !idxTask.invalid() { + if !idxTask.Invalid() { cntPlan++ planCounter.Dec(1) } @@ -1592,7 +1587,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter } // convertToIndexMergeScan builds the index merge scan for intersection or union cases. -func (ds *DataSource) convertToIndexMergeScan(prop *property.PhysicalProperty, candidate *candidatePath, _ *physicalOptimizeOp) (task task, err error) { +func (ds *DataSource) convertToIndexMergeScan(prop *property.PhysicalProperty, candidate *candidatePath, _ *coreusage.PhysicalOptimizeOp) (task Task, err error) { if prop.IsFlashProp() || prop.TaskTp == property.CopSingleReadTaskType { return invalidTask, nil } @@ -1672,7 +1667,7 @@ func (ds *DataSource) convertToIndexMergeScan(prop *property.PhysicalProperty, c // task plan in function `getTaskPlanCost`. if prop.TaskTp == property.RootTaskType { cop.indexPlanFinished = true - task = cop.convertToRootTask(ds.SCtx()) + task = cop.ConvertToRootTask(ds.SCtx()) } else { _, pureTableScan := ts.(*PhysicalTableScan) if !pureTableScan { @@ -1995,7 +1990,7 @@ func (ts *PhysicalTableScan) appendExtraHandleCol(ds *DataSource) (*expression.C // convertToIndexScan converts the DataSource to index scan with idx. func (ds *DataSource) convertToIndexScan(prop *property.PhysicalProperty, - candidate *candidatePath, _ *physicalOptimizeOp) (task task, err error) { + candidate *candidatePath, _ *coreusage.PhysicalOptimizeOp) (task Task, err error) { if candidate.path.Index.MVIndex { // MVIndex is special since different index rows may return the same _row_id and this can break some assumptions of IndexReader. // Currently only support using IndexMerge to access MVIndex instead of IndexReader. @@ -2110,8 +2105,8 @@ func (ds *DataSource) convertToIndexScan(prop *property.PhysicalProperty, finalStats := ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt) is.addPushedDownSelection(cop, ds, path, finalStats) if prop.TaskTp == property.RootTaskType { - task = task.convertToRootTask(ds.SCtx()) - } else if _, ok := task.(*rootTask); ok { + task = task.ConvertToRootTask(ds.SCtx()) + } else if _, ok := task.(*RootTask); ok { return invalidTask, nil } return task, nil @@ -2243,7 +2238,7 @@ func (is *PhysicalIndexScan) addPushedDownSelection(copTask *copTask, p *DataSou logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) selectivity = SelectionFactor } - tableSel.SetStats(copTask.plan().StatsInfo().Scale(selectivity)) + tableSel.SetStats(copTask.Plan().StatsInfo().Scale(selectivity)) } tableSel.SetChildren(copTask.tablePlan) copTask.tablePlan = tableSel @@ -2386,7 +2381,7 @@ func (ds *DataSource) isPointGetPath(path *util.AccessPath) bool { } // convertToTableScan converts the DataSource to table scan. -func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candidate *candidatePath, _ *physicalOptimizeOp) (task task, err error) { +func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candidate *candidatePath, _ *coreusage.PhysicalOptimizeOp) (Task, error) { // It will be handled in convertToIndexScan. if prop.TaskTp == property.CopMultiReadTaskType { return invalidTask, nil @@ -2461,8 +2456,8 @@ func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candid ColumnNames: ds.names, } mppTask = ts.addPushedDownSelectionToMppTask(mppTask, ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt)) - task = mppTask - if !mppTask.invalid() { + var task Task = mppTask + if !mppTask.Invalid() { if prop.TaskTp == property.MppTaskType && len(mppTask.rootTaskConds) > 0 { // If got filters cannot be pushed down to tiflash, we have to make sure it will be executed in TiDB, // So have to return a rootTask, but prop requires mppTask, cannot meet this requirement. @@ -2474,7 +2469,7 @@ func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candid // which cannot pushdown to tiflash(because TiFlash doesn't support some expr in Proj) // So HashJoin cannot pushdown to tiflash. But we still want TableScan to run on tiflash. task = mppTask - task = task.convertToRootTask(ds.SCtx()) + task = task.ConvertToRootTask(ds.SCtx()) } } return task, nil @@ -2495,7 +2490,7 @@ func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candid ColumnNames: ds.names, } ts.PlanPartInfo = copTask.physPlanPartInfo - task = copTask + var task Task = copTask if candidate.isMatchProp { copTask.keepOrder = true if ds.tableInfo.GetPartitionInfo() != nil { @@ -2519,15 +2514,15 @@ func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candid return invalidTask, nil } if prop.TaskTp == property.RootTaskType { - task = task.convertToRootTask(ds.SCtx()) - } else if _, ok := task.(*rootTask); ok { + task = task.ConvertToRootTask(ds.SCtx()) + } else if _, ok := task.(*RootTask); ok { return invalidTask, nil } return task, nil } func (ds *DataSource) convertToSampleTable(prop *property.PhysicalProperty, - candidate *candidatePath, _ *physicalOptimizeOp) (task task, err error) { + candidate *candidatePath, _ *coreusage.PhysicalOptimizeOp) (Task, error) { if prop.TaskTp == property.CopMultiReadTaskType { return invalidTask, nil } @@ -2544,12 +2539,12 @@ func (ds *DataSource) convertToSampleTable(prop *property.PhysicalProperty, Desc: candidate.isMatchProp && prop.SortItems[0].Desc, }.Init(ds.SCtx(), ds.QueryBlockOffset()) p.schema = ds.schema - return &rootTask{ - p: p, - }, nil + rt := &RootTask{} + rt.SetPlan(p) + return rt, nil } -func (ds *DataSource) convertToPointGet(prop *property.PhysicalProperty, candidate *candidatePath) (task task) { +func (ds *DataSource) convertToPointGet(prop *property.PhysicalProperty, candidate *candidatePath) Task { if !prop.IsSortItemEmpty() && !candidate.isMatchProp { return invalidTask } @@ -2577,7 +2572,8 @@ func (ds *DataSource) convertToPointGet(prop *property.PhysicalProperty, candida pointGetPlan.PartitionIdx = ds.partitionDefIdx } pointGetPlan.PartitionNames = ds.partitionNames - rTsk := &rootTask{p: pointGetPlan} + rTsk := &RootTask{} + rTsk.SetPlan(pointGetPlan) if candidate.path.IsIntHandlePath { pointGetPlan.Handle = kv.IntHandle(candidate.path.Ranges[0].LowVal[0].GetInt64()) pointGetPlan.UnsignedHandle = mysql.HasUnsignedFlag(ds.handleCols.GetCol(0).RetType.GetFlag()) @@ -2599,7 +2595,7 @@ func (ds *DataSource) convertToPointGet(prop *property.PhysicalProperty, candida Conditions: candidate.path.TableFilters, }.Init(ds.SCtx(), ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt), ds.QueryBlockOffset()) sel.SetChildren(pointGetPlan) - rTsk.p = sel + rTsk.SetPlan(sel) } } else { pointGetPlan.IndexInfo = candidate.path.Index @@ -2617,14 +2613,14 @@ func (ds *DataSource) convertToPointGet(prop *property.PhysicalProperty, candida Conditions: append(candidate.path.IndexFilters, candidate.path.TableFilters...), }.Init(ds.SCtx(), ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt), ds.QueryBlockOffset()) sel.SetChildren(pointGetPlan) - rTsk.p = sel + rTsk.SetPlan(sel) } } return rTsk } -func (ds *DataSource) convertToBatchPointGet(prop *property.PhysicalProperty, candidate *candidatePath) (task task) { +func (ds *DataSource) convertToBatchPointGet(prop *property.PhysicalProperty, candidate *candidatePath) Task { if !prop.IsSortItemEmpty() && !candidate.isMatchProp { return invalidTask } @@ -2650,7 +2646,7 @@ func (ds *DataSource) convertToBatchPointGet(prop *property.PhysicalProperty, ca if batchPointGetPlan.KeepOrder { batchPointGetPlan.Desc = prop.SortItems[0].Desc } - rTsk := &rootTask{} + rTsk := &RootTask{} if candidate.path.IsIntHandlePath { for _, ran := range candidate.path.Ranges { batchPointGetPlan.Handles = append(batchPointGetPlan.Handles, kv.IntHandle(ran.LowVal[0].GetInt64())) @@ -2664,7 +2660,7 @@ func (ds *DataSource) convertToBatchPointGet(prop *property.PhysicalProperty, ca Conditions: candidate.path.TableFilters, }.Init(ds.SCtx(), ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt), ds.QueryBlockOffset()) sel.SetChildren(batchPointGetPlan) - rTsk.p = sel + rTsk.SetPlan(sel) } } else { batchPointGetPlan.IndexInfo = candidate.path.Index @@ -2689,11 +2685,12 @@ func (ds *DataSource) convertToBatchPointGet(prop *property.PhysicalProperty, ca Conditions: append(candidate.path.IndexFilters, candidate.path.TableFilters...), }.Init(ds.SCtx(), ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt), ds.QueryBlockOffset()) sel.SetChildren(batchPointGetPlan) - rTsk.p = sel + rTsk.SetPlan(sel) } } - if rTsk.p == nil { - rTsk.p = batchPointGetPlan.Init(ds.SCtx(), ds.tableStats.ScaleByExpectCnt(accessCnt), ds.schema.Clone(), ds.names, ds.QueryBlockOffset()) + if rTsk.GetPlan() == nil { + tmpP := batchPointGetPlan.Init(ds.SCtx(), ds.tableStats.ScaleByExpectCnt(accessCnt), ds.schema.Clone(), ds.names, ds.QueryBlockOffset()) + rTsk.SetPlan(tmpP) } return rTsk @@ -2841,7 +2838,7 @@ func (ds *DataSource) getOriginalPhysicalIndexScan(prop *property.PhysicalProper return is } -func (p *LogicalCTE) findBestTask(prop *property.PhysicalProperty, counter *PlanCounterTp, pop *physicalOptimizeOp) (t task, cntPlan int64, err error) { +func (p *LogicalCTE) findBestTask(prop *property.PhysicalProperty, counter *PlanCounterTp, pop *coreusage.PhysicalOptimizeOp) (t Task, cntPlan int64, err error) { if len(p.children) > 0 { return p.baseLogicalPlan.findBestTask(prop, counter, pop) } @@ -2863,7 +2860,10 @@ func (p *LogicalCTE) findBestTask(prop *property.PhysicalProperty, counter *Plan tblColHists: p.StatsInfo().HistColl, } } else { - t = &rootTask{p: pcte, isEmpty: false} + rt := &RootTask{} + rt.SetPlan(pcte) + rt.SetEmpty(false) + t = rt } if prop.CanAddEnforcer { t = enforceProperty(prop, t, p.Plan.SCtx()) @@ -2871,22 +2871,24 @@ func (p *LogicalCTE) findBestTask(prop *property.PhysicalProperty, counter *Plan return t, 1, nil } -func (p *LogicalCTETable) findBestTask(prop *property.PhysicalProperty, _ *PlanCounterTp, _ *physicalOptimizeOp) (t task, cntPlan int64, err error) { +func (p *LogicalCTETable) findBestTask(prop *property.PhysicalProperty, _ *PlanCounterTp, _ *coreusage.PhysicalOptimizeOp) (t Task, cntPlan int64, err error) { if !prop.IsSortItemEmpty() { return nil, 1, nil } pcteTable := PhysicalCTETable{IDForStorage: p.idForStorage}.Init(p.SCtx(), p.StatsInfo()) pcteTable.SetSchema(p.schema) - t = &rootTask{p: pcteTable} + rt := &RootTask{} + rt.SetPlan(pcteTable) + t = rt return t, 1, nil } -func appendCandidate(lp LogicalPlan, task task, prop *property.PhysicalProperty, opt *physicalOptimizeOp) { - if task == nil || task.invalid() { +func appendCandidate(lp LogicalPlan, task Task, prop *property.PhysicalProperty, opt *coreusage.PhysicalOptimizeOp) { + if task == nil || task.Invalid() { return } - opt.appendCandidate(lp, task.plan(), prop) + appendCandidate4PhysicalOptimizeOp(opt, lp, task.Plan(), prop) } // PushDownNot here can convert condition 'not (a != 1)' to 'a = 1'. When we build range from conds, the condition like @@ -2898,12 +2900,12 @@ func pushDownNot(ctx expression.BuildContext, conds []expression.Expression) []e return conds } -func validateTableSamplePlan(ds *DataSource, t task, err error) error { +func validateTableSamplePlan(ds *DataSource, t Task, err error) error { if err != nil { return err } - if ds.SampleInfo != nil && !t.invalid() { - if _, ok := t.plan().(*PhysicalTableSample); !ok { + if ds.SampleInfo != nil && !t.Invalid() { + if _, ok := t.Plan().(*PhysicalTableSample); !ok { return expression.ErrInvalidTableSample.GenWithStackByArgs("plan not supported") } } diff --git a/pkg/planner/core/hint_utils.go b/pkg/planner/core/hint_utils.go index 1399a9420c886..6d8a17d9bd54e 100644 --- a/pkg/planner/core/hint_utils.go +++ b/pkg/planner/core/hint_utils.go @@ -38,17 +38,17 @@ func GenHintsFromFlatPlan(flat *FlatPhysicalPlan) []*ast.TableOptimizerHint { if len(selectPlan) == 0 || !selectPlan[0].IsPhysicalPlan { return nil } - for _, op := range selectPlan { - p := op.Origin.(PhysicalPlan) - hints = genHintsFromSingle(p, nodeTp, op.StoreType, hints) + for _, fop := range selectPlan { + p := fop.Origin.(PhysicalPlan) + hints = genHintsFromSingle(p, nodeTp, fop.StoreType, hints) } for _, cte := range flat.CTEs { - for i, op := range cte { - if i == 0 || !op.IsRoot { + for i, fop := range cte { + if i == 0 || !fop.IsRoot { continue } - p := op.Origin.(PhysicalPlan) - hints = genHintsFromSingle(p, nodeTp, op.StoreType, hints) + p := fop.Origin.(PhysicalPlan) + hints = genHintsFromSingle(p, nodeTp, fop.StoreType, hints) } } return h.RemoveDuplicatedHints(hints) diff --git a/pkg/planner/core/optimizer.go b/pkg/planner/core/optimizer.go index f776b34c110da..032413512cd3f 100644 --- a/pkg/planner/core/optimizer.go +++ b/pkg/planner/core/optimizer.go @@ -18,6 +18,7 @@ import ( "cmp" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "math" "runtime" "slices" @@ -137,7 +138,7 @@ type logicalOptRule interface { The default value is false. It means that no interaction rule will be triggered. 3. error: If there is error during the rule optimizer, it will be thrown */ - optimize(context.Context, LogicalPlan, *plannerutil.LogicalOptimizeOp) (LogicalPlan, bool, error) + optimize(context.Context, LogicalPlan, *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) name() string } @@ -1045,7 +1046,7 @@ func setupFineGrainedShuffleInternal(ctx context.Context, sctx PlanContext, plan // It's for handling the inconsistency between row count in the statsInfo and the recorded actual row count. Please // see comments in PhysicalPlan for details. func propagateProbeParents(plan PhysicalPlan, probeParents []PhysicalPlan) { - plan.setProbeParents(probeParents) + plan.SetProbeParents(probeParents) switch x := plan.(type) { case *PhysicalApply, *PhysicalIndexJoin, *PhysicalIndexHashJoin, *PhysicalIndexMergeJoin: if join, ok := plan.(interface{ getInnerChildIdx() int }); ok { @@ -1122,7 +1123,7 @@ func logicalOptimize(ctx context.Context, flag uint64, logic LogicalPlan) (Logic debugtrace.EnterContextCommon(logic.SCtx()) defer debugtrace.LeaveContextCommon(logic.SCtx()) } - opt := plannerutil.DefaultLogicalOptimizeOption() + opt := coreusage.DefaultLogicalOptimizeOption() vars := logic.SCtx().GetSessionVars() if vars.StmtCtx.EnableOptimizeTrace { vars.StmtCtx.OptimizeTracer = &tracing.OptimizeTracer{} @@ -1190,14 +1191,14 @@ func physicalOptimize(logic LogicalPlan, planCounter *PlanCounterTp) (plan Physi ExpectedCnt: math.MaxFloat64, } - opt := defaultPhysicalOptimizeOption() + opt := coreusage.DefaultPhysicalOptimizeOption() stmtCtx := logic.SCtx().GetSessionVars().StmtCtx if stmtCtx.EnableOptimizeTrace { tracer := &tracing.PhysicalOptimizeTracer{ PhysicalPlanCostDetails: make(map[string]*tracing.PhysicalPlanCostDetail), Candidates: make(map[int]*tracing.CandidatePlanTrace), } - opt = opt.withEnableOptimizeTracer(tracer) + opt = opt.WithEnableOptimizeTracer(tracer) defer func() { r := recover() if r != nil { @@ -1218,7 +1219,7 @@ func physicalOptimize(logic LogicalPlan, planCounter *PlanCounterTp) (plan Physi if *planCounter > 0 { logic.SCtx().GetSessionVars().StmtCtx.AppendWarning(errors.NewNoStackErrorf("The parameter of nth_plan() is out of range")) } - if t.invalid() { + if t.Invalid() { errMsg := "Can't find a proper physical plan for this query" if config.GetGlobalConfig().DisaggregatedTiFlash && !logic.SCtx().GetSessionVars().IsMPPAllowed() { errMsg += ": cop and batchCop are not allowed in disaggregated tiflash mode, you should turn on tidb_allow_mpp switch" @@ -1226,11 +1227,11 @@ func physicalOptimize(logic LogicalPlan, planCounter *PlanCounterTp) (plan Physi return nil, 0, plannererrors.ErrInternal.GenWithStackByArgs(errMsg) } - if err = t.plan().ResolveIndices(); err != nil { + if err = t.Plan().ResolveIndices(); err != nil { return nil, 0, err } - cost, err = getPlanCost(t.plan(), property.RootTaskType, NewDefaultPlanCostOption()) - return t.plan(), cost, err + cost, err = getPlanCost(t.Plan(), property.RootTaskType, coreusage.NewDefaultPlanCostOption()) + return t.Plan(), cost, err } // eliminateUnionScanAndLock set lock property for PointGet and BatchPointGet and eliminates UnionScan and Lock. diff --git a/pkg/planner/core/physical_plans.go b/pkg/planner/core/physical_plans.go index 2b085bf2a9870..3500b0c4b220c 100644 --- a/pkg/planner/core/physical_plans.go +++ b/pkg/planner/core/physical_plans.go @@ -30,6 +30,7 @@ import ( "github.com/pingcap/tidb/pkg/planner/cardinality" "github.com/pingcap/tidb/pkg/planner/property" "github.com/pingcap/tidb/pkg/planner/util" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/statistics" @@ -256,7 +257,7 @@ func (sg *TiKVSingleGather) GetPhysicalIndexReader(schema *expression.Schema, st return reader } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalTableReader) Clone() (PhysicalPlan, error) { cloned := new(PhysicalTableReader) base, err := p.physicalSchemaProducer.cloneWithSelf(cloned) @@ -275,13 +276,13 @@ func (p *PhysicalTableReader) Clone() (PhysicalPlan, error) { return cloned, nil } -// SetChildren overrides PhysicalPlan SetChildren interface. +// SetChildren overrides op.PhysicalPlan SetChildren interface. func (p *PhysicalTableReader) SetChildren(children ...PhysicalPlan) { p.tablePlan = children[0] p.TablePlans = flattenPushDownPlan(p.tablePlan) } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalTableReader) ExtractCorrelatedCols() (corCols []*expression.CorrelatedColumn) { for _, child := range p.TablePlans { corCols = append(corCols, ExtractCorrelatedCols4PhysicalPlan(child)...) @@ -289,7 +290,7 @@ func (p *PhysicalTableReader) ExtractCorrelatedCols() (corCols []*expression.Cor return corCols } -// BuildPlanTrace implements PhysicalPlan interface. +// BuildPlanTrace implements op.PhysicalPlan interface. func (p *PhysicalTableReader) BuildPlanTrace() *tracing.PlanTrace { rp := p.basePhysicalPlan.BuildPlanTrace() if p.tablePlan != nil { @@ -298,8 +299,8 @@ func (p *PhysicalTableReader) BuildPlanTrace() *tracing.PlanTrace { return rp } -func (p *PhysicalTableReader) appendChildCandidate(op *physicalOptimizeOp) { - p.basePhysicalPlan.appendChildCandidate(op) +func (p *PhysicalTableReader) AppendChildCandidate(op *coreusage.PhysicalOptimizeOp) { + p.basePhysicalPlan.AppendChildCandidate(op) appendChildCandidate(p, p.tablePlan, op) } @@ -318,7 +319,7 @@ type PhysicalIndexReader struct { PlanPartInfo PhysPlanPartInfo } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalIndexReader) Clone() (PhysicalPlan, error) { cloned := new(PhysicalIndexReader) base, err := p.physicalSchemaProducer.cloneWithSelf(cloned) @@ -336,7 +337,7 @@ func (p *PhysicalIndexReader) Clone() (PhysicalPlan, error) { return cloned, err } -// SetSchema overrides PhysicalPlan SetSchema interface. +// SetSchema overrides op.PhysicalPlan SetSchema interface. func (p *PhysicalIndexReader) SetSchema(_ *expression.Schema) { if p.indexPlan != nil { p.IndexPlans = flattenPushDownPlan(p.indexPlan) @@ -351,13 +352,13 @@ func (p *PhysicalIndexReader) SetSchema(_ *expression.Schema) { } } -// SetChildren overrides PhysicalPlan SetChildren interface. +// SetChildren overrides op.PhysicalPlan SetChildren interface. func (p *PhysicalIndexReader) SetChildren(children ...PhysicalPlan) { p.indexPlan = children[0] p.SetSchema(nil) } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalIndexReader) ExtractCorrelatedCols() (corCols []*expression.CorrelatedColumn) { for _, child := range p.IndexPlans { corCols = append(corCols, ExtractCorrelatedCols4PhysicalPlan(child)...) @@ -365,7 +366,7 @@ func (p *PhysicalIndexReader) ExtractCorrelatedCols() (corCols []*expression.Cor return corCols } -// BuildPlanTrace implements PhysicalPlan interface. +// BuildPlanTrace implements op.PhysicalPlan interface. func (p *PhysicalIndexReader) BuildPlanTrace() *tracing.PlanTrace { rp := p.basePhysicalPlan.BuildPlanTrace() if p.indexPlan != nil { @@ -374,8 +375,8 @@ func (p *PhysicalIndexReader) BuildPlanTrace() *tracing.PlanTrace { return rp } -func (p *PhysicalIndexReader) appendChildCandidate(op *physicalOptimizeOp) { - p.basePhysicalPlan.appendChildCandidate(op) +func (p *PhysicalIndexReader) AppendChildCandidate(op *coreusage.PhysicalOptimizeOp) { + p.basePhysicalPlan.AppendChildCandidate(op) if p.indexPlan != nil { appendChildCandidate(p, p.indexPlan, op) } @@ -457,7 +458,7 @@ type PhysicalIndexLookUpReader struct { keepOrder bool } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalIndexLookUpReader) Clone() (PhysicalPlan, error) { cloned := new(PhysicalIndexLookUpReader) base, err := p.physicalSchemaProducer.cloneWithSelf(cloned) @@ -492,7 +493,7 @@ func (p *PhysicalIndexLookUpReader) Clone() (PhysicalPlan, error) { return cloned, nil } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalIndexLookUpReader) ExtractCorrelatedCols() (corCols []*expression.CorrelatedColumn) { for _, child := range p.TablePlans { corCols = append(corCols, ExtractCorrelatedCols4PhysicalPlan(child)...) @@ -513,7 +514,7 @@ func (p *PhysicalIndexLookUpReader) GetAvgTableRowSize() float64 { return cardinality.GetAvgRowSize(p.SCtx(), getTblStats(p.tablePlan), p.tablePlan.Schema().Columns, false, false) } -// BuildPlanTrace implements PhysicalPlan interface. +// BuildPlanTrace implements op.PhysicalPlan interface. func (p *PhysicalIndexLookUpReader) BuildPlanTrace() *tracing.PlanTrace { rp := p.basePhysicalPlan.BuildPlanTrace() if p.indexPlan != nil { @@ -525,8 +526,8 @@ func (p *PhysicalIndexLookUpReader) BuildPlanTrace() *tracing.PlanTrace { return rp } -func (p *PhysicalIndexLookUpReader) appendChildCandidate(op *physicalOptimizeOp) { - p.basePhysicalPlan.appendChildCandidate(op) +func (p *PhysicalIndexLookUpReader) appendChildCandidate(op *coreusage.PhysicalOptimizeOp) { + p.basePhysicalPlan.AppendChildCandidate(op) if p.indexPlan != nil { appendChildCandidate(p, p.indexPlan, op) } @@ -606,7 +607,7 @@ func (p *PhysicalIndexMergeReader) GetAvgTableRowSize() float64 { return cardinality.GetAvgRowSize(p.SCtx(), getTblStats(p.TablePlans[len(p.TablePlans)-1]), p.Schema().Columns, false, false) } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalIndexMergeReader) ExtractCorrelatedCols() (corCols []*expression.CorrelatedColumn) { for _, child := range p.TablePlans { corCols = append(corCols, ExtractCorrelatedCols4PhysicalPlan(child)...) @@ -622,7 +623,7 @@ func (p *PhysicalIndexMergeReader) ExtractCorrelatedCols() (corCols []*expressio return corCols } -// BuildPlanTrace implements PhysicalPlan interface. +// BuildPlanTrace implements op.PhysicalPlan interface. func (p *PhysicalIndexMergeReader) BuildPlanTrace() *tracing.PlanTrace { rp := p.basePhysicalPlan.BuildPlanTrace() if p.tablePlan != nil { @@ -634,8 +635,8 @@ func (p *PhysicalIndexMergeReader) BuildPlanTrace() *tracing.PlanTrace { return rp } -func (p *PhysicalIndexMergeReader) appendChildCandidate(op *physicalOptimizeOp) { - p.basePhysicalPlan.appendChildCandidate(op) +func (p *PhysicalIndexMergeReader) appendChildCandidate(op *coreusage.PhysicalOptimizeOp) { + p.basePhysicalPlan.AppendChildCandidate(op) if p.tablePlan != nil { appendChildCandidate(p, p.tablePlan, op) } @@ -731,7 +732,7 @@ type PhysicalIndexScan struct { usedStatsInfo *stmtctx.UsedStatsInfoForTable } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalIndexScan) Clone() (PhysicalPlan, error) { cloned := new(PhysicalIndexScan) *cloned = *p @@ -759,7 +760,7 @@ func (p *PhysicalIndexScan) Clone() (PhysicalPlan, error) { return cloned, nil } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalIndexScan) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0, len(p.AccessCondition)) for _, expr := range p.AccessCondition { @@ -913,7 +914,7 @@ type PhysicalTableScan struct { maxWaitTimeMs int } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (ts *PhysicalTableScan) Clone() (PhysicalPlan, error) { clonedScan := new(PhysicalTableScan) *clonedScan = *ts @@ -940,7 +941,7 @@ func (ts *PhysicalTableScan) Clone() (PhysicalPlan, error) { return clonedScan, nil } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (ts *PhysicalTableScan) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0, len(ts.AccessCondition)+len(ts.LateMaterializationFilterCondition)) for _, expr := range ts.AccessCondition { @@ -1077,7 +1078,7 @@ type PhysicalProjection struct { AvoidColumnEvaluator bool } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalProjection) Clone() (PhysicalPlan, error) { cloned := new(PhysicalProjection) *cloned = *p @@ -1090,7 +1091,7 @@ func (p *PhysicalProjection) Clone() (PhysicalPlan, error) { return cloned, err } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalProjection) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0, len(p.Exprs)) for _, expr := range p.Exprs { @@ -1127,7 +1128,7 @@ func (lt *PhysicalTopN) GetPartitionBy() []property.SortItem { return lt.PartitionBy } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (lt *PhysicalTopN) Clone() (PhysicalPlan, error) { cloned := new(PhysicalTopN) *cloned = *lt @@ -1147,7 +1148,7 @@ func (lt *PhysicalTopN) Clone() (PhysicalPlan, error) { return cloned, nil } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (lt *PhysicalTopN) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0, len(lt.ByItems)) for _, item := range lt.ByItems { @@ -1181,7 +1182,7 @@ type PhysicalApply struct { OuterSchema []*expression.CorrelatedColumn } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (la *PhysicalApply) Clone() (PhysicalPlan, error) { cloned := new(PhysicalApply) base, err := la.PhysicalHashJoin.Clone() @@ -1198,7 +1199,7 @@ func (la *PhysicalApply) Clone() (PhysicalPlan, error) { return cloned, nil } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (la *PhysicalApply) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := la.PhysicalHashJoin.ExtractCorrelatedCols() for i := len(corCols) - 1; i >= 0; i-- { @@ -1276,7 +1277,7 @@ func (p *basePhysicalJoin) cloneWithSelf(newSelf PhysicalPlan) (*basePhysicalJoi return cloned, nil } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *basePhysicalJoin) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0, len(p.LeftConditions)+len(p.RightConditions)+len(p.OtherConditions)) for _, fun := range p.LeftConditions { @@ -1358,7 +1359,7 @@ type PhysicalHashJoin struct { runtimeFilterList []*RuntimeFilter } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalHashJoin) Clone() (PhysicalPlan, error) { cloned := new(PhysicalHashJoin) base, err := p.basePhysicalJoin.cloneWithSelf(cloned) @@ -1381,7 +1382,7 @@ func (p *PhysicalHashJoin) Clone() (PhysicalPlan, error) { return cloned, nil } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalHashJoin) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0, len(p.EqualConditions)+len(p.NAEqualConditions)+len(p.LeftConditions)+len(p.RightConditions)+len(p.OtherConditions)) for _, fun := range p.EqualConditions { @@ -1459,7 +1460,7 @@ func NewPhysicalHashJoin(p *LogicalJoin, innerIdx int, useOuterToBuild bool, new type PhysicalIndexJoin struct { basePhysicalJoin - innerTask task + innerTask Task // Ranges stores the IndexRanges when the inner plan is index scan. Ranges ranger.MutableRanges @@ -1579,7 +1580,7 @@ type PhysicalExchangeReceiver struct { IsCTEReader bool } -// Clone implment PhysicalPlan interface. +// Clone implment op.PhysicalPlan interface. func (p *PhysicalExchangeReceiver) Clone() (PhysicalPlan, error) { np := new(PhysicalExchangeReceiver) base, err := p.basePhysicalPlan.cloneWithSelf(np) @@ -1637,7 +1638,7 @@ func (p PhysicalExpand) Init(ctx PlanContext, stats *property.StatsInfo, offset return &p } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalExpand) Clone() (PhysicalPlan, error) { if len(p.LevelExprs) > 0 { return p.cloneV2() @@ -1706,7 +1707,7 @@ type PhysicalExchangeSender struct { CompressionMode kv.ExchangeCompressionMode } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalExchangeSender) Clone() (PhysicalPlan, error) { np := new(PhysicalExchangeSender) base, err := p.basePhysicalPlan.cloneWithSelf(np) @@ -1734,7 +1735,7 @@ func (p *PhysicalExchangeSender) MemoryUsage() (sum int64) { return } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalMergeJoin) Clone() (PhysicalPlan, error) { cloned := new(PhysicalMergeJoin) base, err := p.basePhysicalJoin.cloneWithSelf(cloned) @@ -1794,7 +1795,7 @@ func (p *PhysicalLimit) GetPartitionBy() []property.SortItem { return p.PartitionBy } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalLimit) Clone() (PhysicalPlan, error) { cloned := new(PhysicalLimit) *cloned = *p @@ -1827,7 +1828,7 @@ type PhysicalUnionAll struct { mpp bool } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalUnionAll) Clone() (PhysicalPlan, error) { cloned := new(PhysicalUnionAll) base, err := p.physicalSchemaProducer.cloneWithSelf(cloned) @@ -1927,7 +1928,7 @@ func (p *basePhysicalAgg) getAggFuncCostFactor(isMPP bool) (factor float64) { return } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *basePhysicalAgg) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0, len(p.GroupByItems)+len(p.AggFuncs)) for _, expr := range p.GroupByItems { @@ -1970,7 +1971,7 @@ func (p *PhysicalHashAgg) getPointer() *basePhysicalAgg { return &p.basePhysicalAgg } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalHashAgg) Clone() (PhysicalPlan, error) { cloned := new(PhysicalHashAgg) base, err := p.basePhysicalAgg.cloneWithSelf(cloned) @@ -2018,7 +2019,7 @@ func (p *PhysicalStreamAgg) getPointer() *basePhysicalAgg { return &p.basePhysicalAgg } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalStreamAgg) Clone() (PhysicalPlan, error) { cloned := new(PhysicalStreamAgg) base, err := p.basePhysicalAgg.cloneWithSelf(cloned) @@ -2048,7 +2049,7 @@ type PhysicalSort struct { IsPartialSort bool } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (ls *PhysicalSort) Clone() (PhysicalPlan, error) { cloned := new(PhysicalSort) cloned.IsPartialSort = ls.IsPartialSort @@ -2063,7 +2064,7 @@ func (ls *PhysicalSort) Clone() (PhysicalPlan, error) { return cloned, nil } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (ls *PhysicalSort) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0, len(ls.ByItems)) for _, item := range ls.ByItems { @@ -2121,7 +2122,7 @@ type PhysicalUnionScan struct { HandleCols HandleCols } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalUnionScan) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0) for _, cond := range p.Conditions { @@ -2178,7 +2179,7 @@ type PhysicalSelection struct { // hasRFConditions bool } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalSelection) Clone() (PhysicalPlan, error) { cloned := new(PhysicalSelection) base, err := p.basePhysicalPlan.cloneWithSelf(cloned) @@ -2190,7 +2191,7 @@ func (p *PhysicalSelection) Clone() (PhysicalPlan, error) { return cloned, nil } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalSelection) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0, len(p.Conditions)) for _, cond := range p.Conditions { @@ -2217,7 +2218,7 @@ type PhysicalMaxOneRow struct { basePhysicalPlan } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalMaxOneRow) Clone() (PhysicalPlan, error) { cloned := new(PhysicalMaxOneRow) base, err := p.basePhysicalPlan.cloneWithSelf(cloned) @@ -2284,7 +2285,7 @@ type PhysicalWindow struct { storeTp kv.StoreType } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalWindow) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0, len(p.WindowFuncDescs)) for _, windowFunc := range p.WindowFuncDescs { @@ -2307,7 +2308,7 @@ func (p *PhysicalWindow) ExtractCorrelatedCols() []*expression.CorrelatedColumn return corCols } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalWindow) Clone() (PhysicalPlan, error) { cloned := new(PhysicalWindow) *cloned = *p @@ -2419,7 +2420,7 @@ type PhysicalShuffleReceiverStub struct { // Receiver points to `executor.shuffleReceiver`. Receiver unsafe.Pointer - // DataSource is the PhysicalPlan of the Receiver. + // DataSource is the op.PhysicalPlan of the Receiver. DataSource PhysicalPlan } @@ -2504,7 +2505,7 @@ func BuildMergeJoinPlan(ctx PlanContext, joinType JoinType, leftKeys, rightKeys return PhysicalMergeJoin{basePhysicalJoin: baseJoin}.Init(ctx, nil, 0) } -// SafeClone clones this PhysicalPlan and handles its panic. +// SafeClone clones this op.PhysicalPlan and handles its panic. func SafeClone(v PhysicalPlan) (_ PhysicalPlan, err error) { defer func() { if r := recover(); r != nil { @@ -2579,7 +2580,7 @@ type PhysicalCTETable struct { IDForStorage int } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalCTE) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := ExtractCorrelatedCols4PhysicalPlan(p.SeedPlan) if p.RecurPlan != nil { @@ -2608,7 +2609,7 @@ func (p *PhysicalCTE) ExplainID() fmt.Stringer { }) } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalCTE) Clone() (PhysicalPlan, error) { cloned := new(PhysicalCTE) base, err := p.physicalSchemaProducer.cloneWithSelf(cloned) @@ -2751,7 +2752,7 @@ func (p *PhysicalCTEStorage) MemoryUsage() (sum int64) { return } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalCTEStorage) Clone() (PhysicalPlan, error) { cloned, err := (*PhysicalCTE)(p).Clone() if err != nil { @@ -2760,7 +2761,7 @@ func (p *PhysicalCTEStorage) Clone() (PhysicalPlan, error) { return (*PhysicalCTEStorage)(cloned.(*PhysicalCTE)), nil } -func appendChildCandidate(origin PhysicalPlan, pp PhysicalPlan, op *physicalOptimizeOp) { +func appendChildCandidate(origin PhysicalPlan, pp PhysicalPlan, op *coreusage.PhysicalOptimizeOp) { candidate := &tracing.CandidatePlanTrace{ PlanTrace: &tracing.PlanTrace{ ID: pp.ID(), @@ -2769,9 +2770,9 @@ func appendChildCandidate(origin PhysicalPlan, pp PhysicalPlan, op *physicalOpti // TODO: trace the cost }, } - op.tracer.AppendCandidate(candidate) - pp.appendChildCandidate(op) - op.tracer.Candidates[origin.ID()].AppendChildrenID(pp.ID()) + op.AppendCandidate(candidate) + pp.AppendChildCandidate(op) + op.GetTracer().Candidates[origin.ID()].AppendChildrenID(pp.ID()) } // PhysicalSequence is the physical representation of LogicalSequence. Used to mark the CTE producers in the plan tree. @@ -2806,7 +2807,7 @@ func (*PhysicalSequence) ExplainInfo() string { return res } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalSequence) Clone() (PhysicalPlan, error) { cloned := new(PhysicalSequence) base, err := p.physicalSchemaProducer.cloneWithSelf(cloned) diff --git a/pkg/planner/core/plan.go b/pkg/planner/core/plan.go index 8b1cff1bd7c75..b4587a2367423 100644 --- a/pkg/planner/core/plan.go +++ b/pkg/planner/core/plan.go @@ -15,7 +15,6 @@ package core import ( - "fmt" "math" "github.com/pingcap/errors" @@ -27,12 +26,12 @@ import ( fd "github.com/pingcap/tidb/pkg/planner/funcdep" "github.com/pingcap/tidb/pkg/planner/property" "github.com/pingcap/tidb/pkg/planner/util" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/size" "github.com/pingcap/tidb/pkg/util/tracing" - "github.com/pingcap/tipb/go-tipb" ) // PlanContext is the context for building plan. @@ -47,52 +46,10 @@ func AsSctx(pctx PlanContext) (sessionctx.Context, error) { return sctx, nil } -// Plan is the description of an execution flow. -// It is created from ast.Node first, then optimized by the optimizer, -// finally used by the executor to create a Cursor which executes the statement. -type Plan interface { - // Get the schema. - Schema() *expression.Schema - - // Get the ID. - ID() int - - // TP get the plan type. - TP() string - - // Get the ID in explain statement - ExplainID() fmt.Stringer - - // ExplainInfo returns operator information to be explained. - ExplainInfo() string - - // ReplaceExprColumns replace all the column reference in the plan's expression node. - ReplaceExprColumns(replace map[string]*expression.Column) - - SCtx() PlanContext - - // StatsInfo will return the property.StatsInfo for this plan. - StatsInfo() *property.StatsInfo - - // OutputNames returns the outputting names of each column. - OutputNames() types.NameSlice - - // SetOutputNames sets the outputting name by the given slice. - SetOutputNames(names types.NameSlice) - - // QueryBlockOffset is query block offset. - // For example, in query - // `select /*+ use_index(@sel_2 t2, a) */ * from t1, (select a*2 as b from t2) tx where a>b` - // the hint should be applied on the sub-query, whose query block is 2. - QueryBlockOffset() int - - BuildPlanTrace() *tracing.PlanTrace -} - -func enforceProperty(p *property.PhysicalProperty, tsk task, ctx PlanContext) task { +func enforceProperty(p *property.PhysicalProperty, tsk Task, ctx PlanContext) Task { if p.TaskTp == property.MppTaskType { mpp, ok := tsk.(*mppTask) - if !ok || mpp.invalid() { + if !ok || mpp.Invalid() { return invalidTask } if !p.IsSortItemAllForPartition() { @@ -103,17 +60,17 @@ func enforceProperty(p *property.PhysicalProperty, tsk task, ctx PlanContext) ta } // when task is double cop task warping a index merge reader, tsk.plan() may be nil when indexPlanFinished is marked // as false, while the real plan is in idxMergePartPlans. tsk.plan()==nil is not right here. - if p.IsSortItemEmpty() || tsk.invalid() { + if p.IsSortItemEmpty() || tsk.Invalid() { return tsk } if p.TaskTp != property.MppTaskType { - tsk = tsk.convertToRootTask(ctx) + tsk = tsk.ConvertToRootTask(ctx) } sortReqProp := &property.PhysicalProperty{TaskTp: property.RootTaskType, SortItems: p.SortItems, ExpectedCnt: math.MaxFloat64} sort := PhysicalSort{ ByItems: make([]*util.ByItems, 0, len(p.SortItems)), IsPartialSort: p.IsSortItemAllForPartition(), - }.Init(ctx, tsk.plan().StatsInfo(), tsk.plan().QueryBlockOffset(), sortReqProp) + }.Init(ctx, tsk.Plan().StatsInfo(), tsk.Plan().QueryBlockOffset(), sortReqProp) for _, col := range p.SortItems { sort.ByItems = append(sort.ByItems, &util.ByItems{Expr: col.Col, Desc: col.Desc}) } @@ -121,23 +78,23 @@ func enforceProperty(p *property.PhysicalProperty, tsk task, ctx PlanContext) ta } // optimizeByShuffle insert `PhysicalShuffle` to optimize performance by running in a parallel manner. -func optimizeByShuffle(tsk task, ctx PlanContext) task { - if tsk.plan() == nil { +func optimizeByShuffle(tsk Task, ctx PlanContext) Task { + if tsk.Plan() == nil { return tsk } - switch p := tsk.plan().(type) { + switch p := tsk.Plan().(type) { case *PhysicalWindow: if shuffle := optimizeByShuffle4Window(p, ctx); shuffle != nil { - return shuffle.attach2Task(tsk) + return shuffle.Attach2Task(tsk) } case *PhysicalMergeJoin: if shuffle := optimizeByShuffle4MergeJoin(p, ctx); shuffle != nil { - return shuffle.attach2Task(tsk) + return shuffle.Attach2Task(tsk) } case *PhysicalStreamAgg: if shuffle := optimizeByShuffle4StreamAgg(p, ctx); shuffle != nil { - return shuffle.attach2Task(tsk) + return shuffle.Attach2Task(tsk) } } return tsk @@ -270,10 +227,10 @@ type LogicalPlan interface { // PredicatePushDown pushes down the predicates in the where/on/having clauses as deeply as possible. // It will accept a predicate that is an expression slice, and return the expressions that can't be pushed. // Because it might change the root if the having clause exists, we need to return a plan that represents a new root. - PredicatePushDown([]expression.Expression, *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) + PredicatePushDown([]expression.Expression, *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) // PruneColumns prunes the unused columns, and return the new logical plan if changed, otherwise it's same. - PruneColumns([]*expression.Column, *util.LogicalOptimizeOp) (LogicalPlan, error) + PruneColumns([]*expression.Column, *coreusage.LogicalOptimizeOp) (LogicalPlan, error) // findBestTask converts the logical plan to the physical plan. It's a new interface. // It is called recursively from the parent to the children to create the result physical plan. @@ -283,7 +240,7 @@ type LogicalPlan interface { // If planCounter > 0, the clock_th plan generated in this function will be returned. // If planCounter = 0, the plan generated in this function will not be considered. // If planCounter = -1, then we will not force plan. - findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, op *physicalOptimizeOp) (task, int64, error) + findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, op *coreusage.PhysicalOptimizeOp) (Task, int64, error) // BuildKeyInfo will collect the information of unique keys into schema. // Because this method is also used in cascades planner, we cannot use @@ -292,16 +249,16 @@ type LogicalPlan interface { BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) // pushDownTopN will push down the topN or limit operator during logical optimization. - pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeOp) LogicalPlan + pushDownTopN(topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) LogicalPlan // deriveTopN derives an implicit TopN from a filter on row_number window function.. - deriveTopN(opt *util.LogicalOptimizeOp) LogicalPlan + deriveTopN(opt *coreusage.LogicalOptimizeOp) LogicalPlan // predicateSimplification consolidates different predcicates on a column and its equivalence classes. - predicateSimplification(opt *util.LogicalOptimizeOp) LogicalPlan + predicateSimplification(opt *coreusage.LogicalOptimizeOp) LogicalPlan // constantPropagation generate new constant predicate according to column equivalence relation - constantPropagation(parentPlan LogicalPlan, currentChildIdx int, opt *util.LogicalOptimizeOp) (newRoot LogicalPlan) + constantPropagation(parentPlan LogicalPlan, currentChildIdx int, opt *coreusage.LogicalOptimizeOp) (newRoot LogicalPlan) // pullUpConstantPredicates recursive find constant predicate, used for the constant propagation rule pullUpConstantPredicates() []expression.Expression @@ -325,7 +282,7 @@ type LogicalPlan interface { // valid, but the ordered indices in leaf plan is limited. So we can get all possible order properties by a pre-walking. PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column - // exhaustPhysicalPlans generates all possible plans that can match the required property. + // exhaustop.PhysicalPlans generates all possible plans that can match the required property. // It will return: // 1. All possible plans that can match the required property. // 2. Whether the SQL hint can work. Return true if there is no hint. @@ -356,120 +313,10 @@ type LogicalPlan interface { ExtractFD() *fd.FDSet } -// PhysicalPlan is a tree of the physical operators. -type PhysicalPlan interface { - Plan - - // getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost on model ver1. - getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) - - // getPlanCostVer2 calculates the cost of the plan if it has not been calculated yet and returns the cost on model ver2. - getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) - - // attach2Task makes the current physical plan as the father of task's physicalPlan and updates the cost of - // current task. If the child's task is cop task, some operator may close this task and return a new rootTask. - attach2Task(...task) task - - // ToPB converts physical plan to tipb executor. - ToPB(ctx PlanContext, storeType kv.StoreType) (*tipb.Executor, error) - - // GetChildReqProps gets the required property by child index. - GetChildReqProps(idx int) *property.PhysicalProperty - - // StatsCount returns the count of property.StatsInfo for this plan. - StatsCount() float64 - - // ExtractCorrelatedCols extracts correlated columns inside the PhysicalPlan. - ExtractCorrelatedCols() []*expression.CorrelatedColumn - - // Children get all the children. - Children() []PhysicalPlan - - // SetChildren sets the children for the plan. - SetChildren(...PhysicalPlan) - - // SetChild sets the ith child for the plan. - SetChild(i int, child PhysicalPlan) - - // ResolveIndices resolves the indices for columns. After doing this, the columns can evaluate the rows by their indices. - ResolveIndices() error - - // StatsInfo returns the StatsInfo of the plan. - StatsInfo() *property.StatsInfo - - // SetStats sets basePlan.stats inside the basePhysicalPlan. - SetStats(s *property.StatsInfo) - - // ExplainNormalizedInfo returns operator normalized information for generating digest. - ExplainNormalizedInfo() string - - // Clone clones this physical plan. - Clone() (PhysicalPlan, error) - - // appendChildCandidate append child physicalPlan into tracer in order to track each child physicalPlan which can't - // be tracked during findBestTask or enumeratePhysicalPlans4Task - appendChildCandidate(op *physicalOptimizeOp) - - // MemoryUsage return the memory usage of PhysicalPlan - MemoryUsage() int64 - - // Below three methods help to handle the inconsistency between row count in the StatsInfo and the recorded - // actual row count. - // For the children in the inner side (probe side) of Index Join and Apply, the row count in the StatsInfo - // means the estimated row count for a single "probe", but the recorded actual row count is the total row - // count for all "probes". - // To handle this inconsistency without breaking anything else, we added a field `probeParents` of - // type `[]PhysicalPlan` into all PhysicalPlan to record all operators that are (indirect or direct) parents - // of this PhysicalPlan and will cause this inconsistency. - // Using this information, we can convert the row count between the "single probe" row count and "all probes" - // row count freely. - - // setProbeParents sets the above stated `probeParents` field. - setProbeParents([]PhysicalPlan) - // getEstRowCountForDisplay uses the "single probe" row count in StatsInfo and the probeParents to calculate - // the "all probe" row count. - // All places that display the row count for a PhysicalPlan are expected to use this method. - getEstRowCountForDisplay() float64 - // getEstRowCountForDisplay uses the runtime stats and the probeParents to calculate the actual "probe" count. - getActualProbeCnt(*execdetails.RuntimeStatsColl) int64 -} - -// NewDefaultPlanCostOption returns PlanCostOption -func NewDefaultPlanCostOption() *PlanCostOption { - return &PlanCostOption{} -} - -// PlanCostOption indicates option during GetPlanCost -type PlanCostOption struct { - CostFlag uint64 - tracer *physicalOptimizeOp -} - -// WithCostFlag set costflag -func (op *PlanCostOption) WithCostFlag(flag uint64) *PlanCostOption { - if op == nil { - return nil - } - op.CostFlag = flag - return op -} - -// WithOptimizeTracer set tracer -func (op *PlanCostOption) WithOptimizeTracer(v *physicalOptimizeOp) *PlanCostOption { - if op == nil { - return nil - } - op.tracer = v - if v != nil && v.tracer != nil { - op.CostFlag |= CostFlagTrace - } - return op -} - type baseLogicalPlan struct { base.Plan - taskMap map[string]task + taskMap map[string]Task // taskMapBak forms a backlog stack of taskMap, used to roll back the taskMap. taskMapBak []string // taskMapBakTS stores the timestamps of logs. @@ -551,10 +398,10 @@ type basePhysicalPlan struct { // used by the new cost interface planCostInit bool planCost float64 - planCostVer2 costVer2 + planCostVer2 coreusage.CostVer2 // probeParents records the IndexJoins and Applys with this operator in their inner children. - // Please see comments in PhysicalPlan for details. + // Please see comments in op.PhysicalPlan for details. probeParents []PhysicalPlan // Only for MPP. If TiFlashFineGrainedShuffleStreamCount > 0: @@ -586,7 +433,7 @@ func (p *basePhysicalPlan) cloneWithSelf(newSelf PhysicalPlan) (*basePhysicalPla return base, nil } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *basePhysicalPlan) Clone() (PhysicalPlan, error) { return nil, errors.Errorf("%T doesn't support cloning", p.self) } @@ -596,7 +443,7 @@ func (*basePhysicalPlan) ExplainInfo() string { return "" } -// ExplainNormalizedInfo implements PhysicalPlan interface. +// ExplainNormalizedInfo implements op.PhysicalPlan interface. func (*basePhysicalPlan) ExplainNormalizedInfo() string { return "" } @@ -605,12 +452,12 @@ func (p *basePhysicalPlan) GetChildReqProps(idx int) *property.PhysicalProperty return p.childrenReqProps[idx] } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (*basePhysicalPlan) ExtractCorrelatedCols() []*expression.CorrelatedColumn { return nil } -// MemoryUsage return the memory usage of basePhysicalPlan +// MemoryUsage return the memory usage of baseop.PhysicalPlan func (p *basePhysicalPlan) MemoryUsage() (sum int64) { if p == nil { return @@ -629,21 +476,21 @@ func (p *basePhysicalPlan) MemoryUsage() (sum int64) { return } -func (p *basePhysicalPlan) getEstRowCountForDisplay() float64 { +func (p *basePhysicalPlan) GetEstRowCountForDisplay() float64 { if p == nil { return 0 } return p.StatsInfo().RowCount * getEstimatedProbeCntFromProbeParents(p.probeParents) } -func (p *basePhysicalPlan) getActualProbeCnt(statsColl *execdetails.RuntimeStatsColl) int64 { +func (p *basePhysicalPlan) GetActualProbeCnt(statsColl *execdetails.RuntimeStatsColl) int64 { if p == nil { return 1 } return getActualProbeCntFromProbeParents(p.probeParents, statsColl) } -func (p *basePhysicalPlan) setProbeParents(probeParents []PhysicalPlan) { +func (p *basePhysicalPlan) SetProbeParents(probeParents []PhysicalPlan) { p.probeParents = probeParents } @@ -681,12 +528,12 @@ func (p *baseLogicalPlan) rollBackTaskMap(ts uint64) { } } -func (p *baseLogicalPlan) getTask(prop *property.PhysicalProperty) task { +func (p *baseLogicalPlan) getTask(prop *property.PhysicalProperty) Task { key := prop.HashCode() return p.taskMap[string(key)] } -func (p *baseLogicalPlan) storeTask(prop *property.PhysicalProperty, task task) { +func (p *baseLogicalPlan) storeTask(prop *property.PhysicalProperty, task Task) { key := prop.HashCode() if p.SCtx().GetSessionVars().StmtCtx.StmtHints.TaskMapNeedBackUp() { // Empty string for useless change. @@ -755,7 +602,7 @@ func (p *logicalSchemaProducer) BuildKeyInfo(selfSchema *expression.Schema, chil func newBaseLogicalPlan(ctx PlanContext, tp string, self LogicalPlan, qbOffset int) baseLogicalPlan { return baseLogicalPlan{ - taskMap: make(map[string]task), + taskMap: make(map[string]Task), taskMapBak: make([]string, 0, 10), taskMapBakTS: make([]uint64, 0, 10), Plan: base.NewBasePlan(ctx, tp, qbOffset), @@ -775,7 +622,7 @@ func (*baseLogicalPlan) ExtractCorrelatedCols() []*expression.CorrelatedColumn { } // PruneColumns implements LogicalPlan interface. -func (p *baseLogicalPlan) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *baseLogicalPlan) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { if len(p.children) == 0 { return p.self, nil } @@ -810,7 +657,7 @@ func (p *baseLogicalPlan) Children() []LogicalPlan { return p.children } -// Children implements PhysicalPlan Children interface. +// Children implements op.PhysicalPlan Children interface. func (p *basePhysicalPlan) Children() []PhysicalPlan { return p.children } @@ -820,7 +667,7 @@ func (p *baseLogicalPlan) SetChildren(children ...LogicalPlan) { p.children = children } -// SetChildren implements PhysicalPlan SetChildren interface. +// SetChildren implements op.PhysicalPlan SetChildren interface. func (p *basePhysicalPlan) SetChildren(children ...PhysicalPlan) { p.children = children } @@ -830,7 +677,7 @@ func (p *baseLogicalPlan) SetChild(i int, child LogicalPlan) { p.children[i] = child } -// SetChild implements PhysicalPlan SetChild interface. +// SetChild implements op.PhysicalPlan SetChild interface. func (p *basePhysicalPlan) SetChild(i int, child PhysicalPlan) { p.children[i] = child } @@ -860,7 +707,7 @@ func (p *baseLogicalPlan) BuildPlanTrace() *tracing.PlanTrace { return planTrace } -func (p *basePhysicalPlan) appendChildCandidate(op *physicalOptimizeOp) { +func (p *basePhysicalPlan) AppendChildCandidate(op *coreusage.PhysicalOptimizeOp) { if len(p.Children()) < 1 { return } @@ -870,9 +717,9 @@ func (p *basePhysicalPlan) appendChildCandidate(op *physicalOptimizeOp) { PlanTrace: &tracing.PlanTrace{TP: child.TP(), ID: child.ID(), ExplainInfo: child.ExplainInfo()}, } - op.tracer.AppendCandidate(childCandidate) - child.appendChildCandidate(op) + op.AppendCandidate(childCandidate) + child.AppendChildCandidate(op) childrenID = append(childrenID, child.ID()) } - op.tracer.Candidates[p.ID()].PlanTrace.AppendChildrenID(childrenID...) + op.GetTracer().Candidates[p.ID()].PlanTrace.AppendChildrenID(childrenID...) } diff --git a/pkg/planner/core/plan_base.go b/pkg/planner/core/plan_base.go new file mode 100644 index 0000000000000..0f109953480bb --- /dev/null +++ b/pkg/planner/core/plan_base.go @@ -0,0 +1,133 @@ +package core + +import ( + "fmt" + "github.com/pingcap/tidb/pkg/expression" + "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/planner/property" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" + "github.com/pingcap/tidb/pkg/types" + "github.com/pingcap/tidb/pkg/util/execdetails" + "github.com/pingcap/tidb/pkg/util/tracing" + "github.com/pingcap/tipb/go-tipb" +) + +// Plan is the description of an execution flow. +// It is created from ast.Node first, then optimized by the optimizer, +// finally used by the executor to create a Cursor which executes the statement. +type Plan interface { + // Get the schema. + Schema() *expression.Schema + + // Get the ID. + ID() int + + // TP get the plan type. + TP() string + + // Get the ID in explain statement + ExplainID() fmt.Stringer + + // ExplainInfo returns operator information to be explained. + ExplainInfo() string + + // ReplaceExprColumns replace all the column reference in the plan's expression node. + ReplaceExprColumns(replace map[string]*expression.Column) + + SCtx() PlanContext + + // StatsInfo will return the property.StatsInfo for this plan. + StatsInfo() *property.StatsInfo + + // OutputNames returns the outputting names of each column. + OutputNames() types.NameSlice + + // SetOutputNames sets the outputting name by the given slice. + SetOutputNames(names types.NameSlice) + + // QueryBlockOffset is query block offset. + // For example, in query + // `select /*+ use_index(@sel_2 t2, a) */ * from t1, (select a*2 as b from t2) tx where a>b` + // the hint should be applied on the sub-query, whose query block is 2. + QueryBlockOffset() int + + BuildPlanTrace() *tracing.PlanTrace +} + +// PhysicalPlan is a tree of the physical operators. +type PhysicalPlan interface { + Plan + + // GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost on model ver1. + GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) + + // GetPlanCostVer2 calculates the cost of the plan if it has not been calculated yet and returns the cost on model ver2. + GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) + + // Attach2Task makes the current physical plan as the father of task's physicalPlan and updates the cost of + // current task. If the child's task is cop task, some operator may close this task and return a new rootTask. + Attach2Task(...Task) Task + + // ToPB converts physical plan to tipb executor. + ToPB(ctx PlanContext, storeType kv.StoreType) (*tipb.Executor, error) + + // GetChildReqProps gets the required property by child index. + GetChildReqProps(idx int) *property.PhysicalProperty + + // StatsCount returns the count of property.StatsInfo for this plan. + StatsCount() float64 + + // ExtractCorrelatedCols extracts correlated columns inside the PhysicalPlan. + ExtractCorrelatedCols() []*expression.CorrelatedColumn + + // Children get all the children. + Children() []PhysicalPlan + + // SetChildren sets the children for the plan. + SetChildren(...PhysicalPlan) + + // SetChild sets the ith child for the plan. + SetChild(i int, child PhysicalPlan) + + // ResolveIndices resolves the indices for columns. After doing this, the columns can evaluate the rows by their indices. + ResolveIndices() error + + // StatsInfo returns the StatsInfo of the plan. + StatsInfo() *property.StatsInfo + + // SetStats sets basePlan.stats inside the basePhysicalPlan. + SetStats(s *property.StatsInfo) + + // ExplainNormalizedInfo returns operator normalized information for generating digest. + ExplainNormalizedInfo() string + + // Clone clones this physical plan. + Clone() (PhysicalPlan, error) + + // AppendChildCandidate append child physicalPlan into tracer in order to track each child physicalPlan which can't + // be tracked during findBestTask or enumeratePhysicalPlans4Task + AppendChildCandidate(op *coreusage.PhysicalOptimizeOp) + + // MemoryUsage return the memory usage of PhysicalPlan + MemoryUsage() int64 + + // Below three methods help to handle the inconsistency between row count in the StatsInfo and the recorded + // actual row count. + // For the children in the inner side (probe side) of Index Join and Apply, the row count in the StatsInfo + // means the estimated row count for a single "probe", but the recorded actual row count is the total row + // count for all "probes". + // To handle this inconsistency without breaking anything else, we added a field `probeParents` of + // type `[]PhysicalPlan` into all PhysicalPlan to record all operators that are (indirect or direct) parents + // of this PhysicalPlan and will cause this inconsistency. + // Using this information, we can convert the row count between the "single probe" row count and "all probes" + // row count freely. + + // SetProbeParents sets the above stated `probeParents` field. + SetProbeParents([]PhysicalPlan) + // GetEstRowCountForDisplay uses the "single probe" row count in StatsInfo and the probeParents to calculate + // the "all probe" row count. + // All places that display the row count for a PhysicalPlan are expected to use this method. + GetEstRowCountForDisplay() float64 + // GetActualProbeCnt uses the runtime stats and the probeParents to calculate the actual "probe" count. + GetActualProbeCnt(*execdetails.RuntimeStatsColl) int64 +} diff --git a/pkg/planner/core/plan_cache.go b/pkg/planner/core/plan_cache.go index d6bba78c568d1..d5c9e21076c85 100644 --- a/pkg/planner/core/plan_cache.go +++ b/pkg/planner/core/plan_cache.go @@ -16,7 +16,6 @@ package core import ( "context" - "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/bindinfo" "github.com/pingcap/tidb/pkg/domain" diff --git a/pkg/planner/core/plan_cost_detail.go b/pkg/planner/core/plan_cost_detail.go index 8ba5c98ac0b61..5d17b614b6610 100644 --- a/pkg/planner/core/plan_cost_detail.go +++ b/pkg/planner/core/plan_cost_detail.go @@ -16,6 +16,7 @@ package core import ( "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/util/tracing" @@ -87,7 +88,7 @@ const ( MemQuotaLbl = "memQuota" ) -func setPointGetPlanCostDetail(p *PointGetPlan, opt *physicalOptimizeOp, +func setPointGetPlanCostDetail(p *PointGetPlan, opt *coreusage.PhysicalOptimizeOp, rowSize, networkFactor, seekFactor float64) { if opt == nil { return @@ -97,10 +98,10 @@ func setPointGetPlanCostDetail(p *PointGetPlan, opt *physicalOptimizeOp, AddParam(NetworkFactorLbl, networkFactor). AddParam(SeekFactorLbl, seekFactor). SetDesc(fmt.Sprintf("%s*%s+%s", RowSizeLbl, NetworkFactorLbl, SeekFactorLbl)) - opt.appendPlanCostDetail(detail) + appendPlanCostDetail4PhysicalOptimizeOp(opt, detail) } -func setBatchPointGetPlanCostDetail(p *BatchPointGetPlan, opt *physicalOptimizeOp, +func setBatchPointGetPlanCostDetail(p *BatchPointGetPlan, opt *coreusage.PhysicalOptimizeOp, rowCount, rowSize, networkFactor, seekFactor float64, scanConcurrency int) { if opt == nil { return @@ -113,10 +114,10 @@ func setBatchPointGetPlanCostDetail(p *BatchPointGetPlan, opt *physicalOptimizeO AddParam(ScanConcurrencyLbl, scanConcurrency). SetDesc(fmt.Sprintf("(%s*%s*%s+%s*%s)/%s", RowCountLbl, RowSizeLbl, NetworkFactorLbl, RowCountLbl, SeekFactorLbl, ScanConcurrencyLbl)) - opt.appendPlanCostDetail(detail) + appendPlanCostDetail4PhysicalOptimizeOp(opt, detail) } -func setPhysicalTableOrIndexScanCostDetail(p PhysicalPlan, opt *physicalOptimizeOp, +func setPhysicalTableOrIndexScanCostDetail(p PhysicalPlan, opt *coreusage.PhysicalOptimizeOp, rowCount, rowSize, scanFactor float64, costModelVersion int) { if opt == nil { return @@ -137,10 +138,10 @@ func setPhysicalTableOrIndexScanCostDetail(p PhysicalPlan, opt *physicalOptimize desc = fmt.Sprintf("%s*log2(%s)*%s", RowCountLbl, RowSizeLbl, ScanFactorLbl) } detail.SetDesc(desc) - opt.appendPlanCostDetail(detail) + appendPlanCostDetail4PhysicalOptimizeOp(opt, detail) } -func setPhysicalTableReaderCostDetail(p *PhysicalTableReader, opt *physicalOptimizeOp, +func setPhysicalTableReaderCostDetail(p *PhysicalTableReader, opt *coreusage.PhysicalOptimizeOp, rowCount, rowSize, networkFactor, netSeekCost, tablePlanCost float64, scanConcurrency int, storeType kv.StoreType) { // tracer haven't support non tikv plan for now @@ -156,10 +157,10 @@ func setPhysicalTableReaderCostDetail(p *PhysicalTableReader, opt *physicalOptim AddParam(ScanConcurrencyLbl, scanConcurrency) detail.SetDesc(fmt.Sprintf("(%s+%s*%s*%s+%s)/%s", TablePlanCostLbl, RowCountLbl, RowSizeLbl, NetworkFactorLbl, NetSeekCostLbl, ScanConcurrencyLbl)) - opt.appendPlanCostDetail(detail) + appendPlanCostDetail4PhysicalOptimizeOp(opt, detail) } -func setPhysicalIndexReaderCostDetail(p *PhysicalIndexReader, opt *physicalOptimizeOp, +func setPhysicalIndexReaderCostDetail(p *PhysicalIndexReader, opt *coreusage.PhysicalOptimizeOp, rowCount, rowSize, networkFactor, netSeekCost, indexPlanCost float64, scanConcurrency int) { if opt == nil { @@ -174,10 +175,10 @@ func setPhysicalIndexReaderCostDetail(p *PhysicalIndexReader, opt *physicalOptim AddParam(ScanConcurrencyLbl, scanConcurrency) detail.SetDesc(fmt.Sprintf("(%s+%s*%s*%s+%s)/%s", IndexPlanCostLbl, RowCountLbl, RowSizeLbl, NetworkFactorLbl, NetSeekCostLbl, ScanConcurrencyLbl)) - opt.appendPlanCostDetail(detail) + appendPlanCostDetail4PhysicalOptimizeOp(opt, detail) } -func setPhysicalHashJoinCostDetail(p *PhysicalHashJoin, opt *physicalOptimizeOp, spill bool, +func setPhysicalHashJoinCostDetail(p *PhysicalHashJoin, opt *coreusage.PhysicalOptimizeOp, spill bool, buildCnt, probeCnt, cpuFactor, rowSize, numPairs, cpuCost, probeCPUCost, memCost, diskCost, probeDiskCost, diskFactor, memoryFactor, concurrencyFactor float64, @@ -238,7 +239,7 @@ func setPhysicalHashJoinCostDetail(p *PhysicalHashJoin, opt *physicalOptimizeOp, AddParam(ProbeDiskCostDescLbl, diskCostDetail.probeDesc()) detail.SetDesc(fmt.Sprintf("%s+%s+%s+all children cost", CPUCostDetailLbl, MemCostDetailLbl, DiskCostDetailLbl)) - opt.appendPlanCostDetail(detail) + appendPlanCostDetail4PhysicalOptimizeOp(opt, detail) } // HashJoinProbeCostDetail indicates probe cpu cost detail diff --git a/pkg/planner/core/plan_cost_ver1.go b/pkg/planner/core/plan_cost_ver1.go index a0c7fdf80da26..b49a07854616b 100644 --- a/pkg/planner/core/plan_cost_ver1.go +++ b/pkg/planner/core/plan_cost_ver1.go @@ -15,6 +15,7 @@ package core import ( + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "math" "github.com/pingcap/errors" @@ -27,17 +28,6 @@ import ( "github.com/pingcap/tidb/pkg/util/paging" ) -const ( - // CostFlagRecalculate indicates the optimizer to ignore cached cost and recalculate it again. - CostFlagRecalculate uint64 = 1 << iota - - // CostFlagUseTrueCardinality indicates the optimizer to use true cardinality to calculate the cost. - CostFlagUseTrueCardinality - - // CostFlagTrace indicates whether to trace the cost calculation. - CostFlagTrace -) - const ( modelVer1 = 1 modelVer2 = 2 @@ -47,16 +37,16 @@ func hasCostFlag(costFlag, flag uint64) bool { return (costFlag & flag) > 0 } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *basePhysicalPlan) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *basePhysicalPlan) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { // just calculate the cost once and always reuse it return p.planCost, nil } p.planCost = 0 // the default implementation, the operator have no cost for _, child := range p.children { - childCost, err := child.getPlanCostVer1(taskType, option) + childCost, err := child.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -66,10 +56,10 @@ func (p *basePhysicalPlan) getPlanCostVer1(taskType property.TaskType, option *P return p.planCost, nil } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalSelection) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalSelection) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } @@ -88,7 +78,7 @@ func (p *PhysicalSelection) getPlanCostVer1(taskType property.TaskType, option * selfCost = 0 // for compatibility, see https://github.com/pingcap/tidb/issues/36243 } - childCost, err := p.children[0].getPlanCostVer1(taskType, option) + childCost, err := p.children[0].GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -110,13 +100,13 @@ func (p *PhysicalProjection) GetCost(count float64) float64 { return cpuCost + concurrencyCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalProjection) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalProjection) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } - childCost, err := p.children[0].getPlanCostVer1(taskType, option) + childCost, err := p.children[0].GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -172,17 +162,17 @@ func (p *PhysicalIndexLookUpReader) GetCost(costFlag uint64) (cost float64) { return } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalIndexLookUpReader) getPlanCostVer1(_ property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalIndexLookUpReader) GetPlanCostVer1(_ property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } p.planCost = 0 // child's cost for _, child := range []PhysicalPlan{p.indexPlan, p.tablePlan} { - childCost, err := child.getPlanCostVer1(property.CopMultiReadTaskType, option) + childCost, err := child.GetPlanCostVer1(property.CopMultiReadTaskType, option) if err != nil { return 0, err } @@ -196,7 +186,7 @@ func (p *PhysicalIndexLookUpReader) getPlanCostVer1(_ property.TaskType, option tmp = tmp.Children()[0] } ts := tmp.(*PhysicalTableScan) - tblCost, err := ts.getPlanCostVer1(property.CopMultiReadTaskType, option) + tblCost, err := ts.GetPlanCostVer1(property.CopMultiReadTaskType, option) if err != nil { return 0, err } @@ -227,17 +217,17 @@ func (p *PhysicalIndexLookUpReader) getPlanCostVer1(_ property.TaskType, option return p.planCost, nil } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalIndexReader) getPlanCostVer1(_ property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalIndexReader) GetPlanCostVer1(_ property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } var rowCount, rowSize, netFactor, indexPlanCost, netSeekCost float64 sqlScanConcurrency := p.SCtx().GetSessionVars().DistSQLScanConcurrency() // child's cost - childCost, err := p.indexPlan.getPlanCostVer1(property.CopSingleReadTaskType, option) + childCost, err := p.indexPlan.GetPlanCostVer1(property.CopSingleReadTaskType, option) if err != nil { return 0, err } @@ -255,8 +245,8 @@ func (p *PhysicalIndexReader) getPlanCostVer1(_ property.TaskType, option *PlanC // consider concurrency p.planCost /= float64(sqlScanConcurrency) - if option.tracer != nil { - setPhysicalIndexReaderCostDetail(p, option.tracer, rowCount, rowSize, netFactor, netSeekCost, indexPlanCost, sqlScanConcurrency) + if option.GetTracer() != nil { + setPhysicalIndexReaderCostDetail(p, option.GetTracer(), rowCount, rowSize, netFactor, netSeekCost, indexPlanCost, sqlScanConcurrency) } p.planCostInit = true return p.planCost, nil @@ -269,10 +259,10 @@ func (p *PhysicalIndexReader) GetNetDataSize() float64 { return p.indexPlan.StatsCount() * rowSize } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalTableReader) getPlanCostVer1(_ property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalTableReader) GetPlanCostVer1(_ property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } @@ -284,7 +274,7 @@ func (p *PhysicalTableReader) getPlanCostVer1(_ property.TaskType, option *PlanC switch storeType { case kv.TiKV: // child's cost - childCost, err := p.tablePlan.getPlanCostVer1(property.CopSingleReadTaskType, option) + childCost, err := p.tablePlan.GetPlanCostVer1(property.CopSingleReadTaskType, option) if err != nil { return 0, err } @@ -307,7 +297,7 @@ func (p *PhysicalTableReader) getPlanCostVer1(_ property.TaskType, option *PlanC concurrency = p.SCtx().GetSessionVars().CopTiFlashConcurrencyFactor rowSize = collectRowSizeFromMPPPlan(p.tablePlan) seekCost = accumulateNetSeekCost4MPP(p.tablePlan) - childCost, err := p.tablePlan.getPlanCostVer1(property.MppTaskType, option) + childCost, err := p.tablePlan.GetPlanCostVer1(property.MppTaskType, option) if err != nil { return 0, err } @@ -318,7 +308,7 @@ func (p *PhysicalTableReader) getPlanCostVer1(_ property.TaskType, option *PlanC rowSize = cardinality.GetAvgRowSize(p.SCtx(), getTblStats(p.tablePlan), p.tablePlan.Schema().Columns, false, false) seekCost = estimateNetSeekCost(p.tablePlan) tType := property.CopSingleReadTaskType - childCost, err := p.tablePlan.getPlanCostVer1(tType, option) + childCost, err := p.tablePlan.GetPlanCostVer1(tType, option) if err != nil { return 0, err } @@ -333,12 +323,12 @@ func (p *PhysicalTableReader) getPlanCostVer1(_ property.TaskType, option *PlanC p.planCost /= concurrency // consider tidb_enforce_mpp if isMPP && p.SCtx().GetSessionVars().IsMPPEnforced() && - !hasCostFlag(costFlag, CostFlagRecalculate) { // show the real cost in explain-statements + !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { // show the real cost in explain-statements p.planCost /= 1000000000 } } - if option.tracer != nil { - setPhysicalTableReaderCostDetail(p, option.tracer, + if option.GetTracer() != nil { + setPhysicalTableReaderCostDetail(p, option.GetTracer(), rowCount, rowSize, netFactor, netSeekCost, tableCost, sqlScanConcurrency, storeType) } @@ -352,16 +342,16 @@ func (p *PhysicalTableReader) GetNetDataSize() float64 { return p.tablePlan.StatsCount() * rowSize } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalIndexMergeReader) getPlanCostVer1(_ property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalIndexMergeReader) GetPlanCostVer1(_ property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } p.planCost = 0 if tblScan := p.tablePlan; tblScan != nil { - childCost, err := tblScan.getPlanCostVer1(property.CopSingleReadTaskType, option) + childCost, err := tblScan.GetPlanCostVer1(property.CopSingleReadTaskType, option) if err != nil { return 0, err } @@ -372,7 +362,7 @@ func (p *PhysicalIndexMergeReader) getPlanCostVer1(_ property.TaskType, option * p.planCost += getCardinality(tblScan, costFlag) * rowSize * netFactor // net I/O cost } for _, partialScan := range p.partialPlans { - childCost, err := partialScan.getPlanCostVer1(property.CopSingleReadTaskType, option) + childCost, err := partialScan.GetPlanCostVer1(property.CopSingleReadTaskType, option) if err != nil { return 0, err } @@ -421,10 +411,10 @@ func (p *PhysicalIndexMergeReader) GetPartialReaderNetDataSize(plan PhysicalPlan return plan.StatsCount() * cardinality.GetAvgRowSize(p.SCtx(), getTblStats(plan), plan.Schema().Columns, isIdxScan, false) } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalTableScan) getPlanCostVer1(_ property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalTableScan) GetPlanCostVer1(_ property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } @@ -438,18 +428,18 @@ func (p *PhysicalTableScan) getPlanCostVer1(_ property.TaskType, option *PlanCos rowCount = getCardinality(p, costFlag) rowSize = p.getScanRowSize() selfCost = rowCount * rowSize * scanFactor - if option.tracer != nil { - setPhysicalTableOrIndexScanCostDetail(p, option.tracer, rowCount, rowSize, scanFactor, costModelVersion) + if option.GetTracer() != nil { + setPhysicalTableOrIndexScanCostDetail(p, option.GetTracer(), rowCount, rowSize, scanFactor, costModelVersion) } p.planCost = selfCost p.planCostInit = true return p.planCost, nil } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalIndexScan) getPlanCostVer1(_ property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalIndexScan) GetPlanCostVer1(_ property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } @@ -463,8 +453,8 @@ func (p *PhysicalIndexScan) getPlanCostVer1(_ property.TaskType, option *PlanCos rowCount = getCardinality(p, costFlag) rowSize = p.getScanRowSize() selfCost = rowCount * rowSize * scanFactor - if option.tracer != nil { - setPhysicalTableOrIndexScanCostDetail(p, option.tracer, rowCount, rowSize, scanFactor, costModelVersion) + if option.GetTracer() != nil { + setPhysicalTableOrIndexScanCostDetail(p, option.GetTracer(), rowCount, rowSize, scanFactor, costModelVersion) } p.planCost = selfCost p.planCostInit = true @@ -509,7 +499,7 @@ func (p *PhysicalIndexJoin) GetCost(outerCnt, innerCnt, outerCost, innerCost flo numPairs = 0 } } - if hasCostFlag(costFlag, CostFlagUseTrueCardinality) { + if hasCostFlag(costFlag, coreusage.CostFlagUseTrueCardinality) { numPairs = getOperatorActRows(p) } probeCost := numPairs * sessVars.GetCPUFactor() @@ -531,24 +521,24 @@ func (p *PhysicalIndexJoin) GetCost(outerCnt, innerCnt, outerCost, innerCost flo return outerCost + innerPlanCost + cpuCost + memoryCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalIndexJoin) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalIndexJoin) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } outerChild, innerChild := p.children[1-p.InnerChildIdx], p.children[p.InnerChildIdx] - outerCost, err := outerChild.getPlanCostVer1(taskType, option) + outerCost, err := outerChild.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } - innerCost, err := innerChild.getPlanCostVer1(taskType, option) + innerCost, err := innerChild.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } outerCnt := getCardinality(outerChild, costFlag) innerCnt := getCardinality(innerChild, costFlag) - if hasCostFlag(costFlag, CostFlagUseTrueCardinality) && outerCnt > 0 { + if hasCostFlag(costFlag, coreusage.CostFlagUseTrueCardinality) && outerCnt > 0 { innerCnt /= outerCnt // corresponding to one outer row when calculating IndexJoin costs innerCost /= outerCnt } @@ -596,7 +586,7 @@ func (p *PhysicalIndexHashJoin) GetCost(outerCnt, innerCnt, outerCost, innerCost numPairs = 0 } } - if hasCostFlag(costFlag, CostFlagUseTrueCardinality) { + if hasCostFlag(costFlag, coreusage.CostFlagUseTrueCardinality) { numPairs = getOperatorActRows(p) } // Inner workers do hash join in parallel, but they can only save ONE outer @@ -620,24 +610,24 @@ func (p *PhysicalIndexHashJoin) GetCost(outerCnt, innerCnt, outerCost, innerCost return outerCost + innerPlanCost + cpuCost + memoryCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalIndexHashJoin) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalIndexHashJoin) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } outerChild, innerChild := p.children[1-p.InnerChildIdx], p.children[p.InnerChildIdx] - outerCost, err := outerChild.getPlanCostVer1(taskType, option) + outerCost, err := outerChild.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } - innerCost, err := innerChild.getPlanCostVer1(taskType, option) + innerCost, err := innerChild.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } outerCnt := getCardinality(outerChild, costFlag) innerCnt := getCardinality(innerChild, costFlag) - if hasCostFlag(costFlag, CostFlagUseTrueCardinality) && outerCnt > 0 { + if hasCostFlag(costFlag, coreusage.CostFlagUseTrueCardinality) && outerCnt > 0 { innerCnt /= outerCnt // corresponding to one outer row when calculating IndexJoin costs innerCost /= outerCnt } @@ -687,7 +677,7 @@ func (p *PhysicalIndexMergeJoin) GetCost(outerCnt, innerCnt, outerCost, innerCos numPairs = 0 } } - if hasCostFlag(costFlag, CostFlagUseTrueCardinality) { + if hasCostFlag(costFlag, coreusage.CostFlagUseTrueCardinality) { numPairs = getOperatorActRows(p) } avgProbeCnt := numPairs / outerCnt @@ -711,24 +701,24 @@ func (p *PhysicalIndexMergeJoin) GetCost(outerCnt, innerCnt, outerCost, innerCos return outerCost + innerPlanCost + cpuCost + memoryCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalIndexMergeJoin) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalIndexMergeJoin) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } outerChild, innerChild := p.children[1-p.InnerChildIdx], p.children[p.InnerChildIdx] - outerCost, err := outerChild.getPlanCostVer1(taskType, option) + outerCost, err := outerChild.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } - innerCost, err := innerChild.getPlanCostVer1(taskType, option) + innerCost, err := innerChild.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } outerCnt := getCardinality(outerChild, costFlag) innerCnt := getCardinality(innerChild, costFlag) - if hasCostFlag(costFlag, CostFlagUseTrueCardinality) && outerCnt > 0 { + if hasCostFlag(costFlag, coreusage.CostFlagUseTrueCardinality) && outerCnt > 0 { innerCnt /= outerCnt // corresponding to one outer row when calculating IndexJoin costs innerCost /= outerCnt } @@ -764,18 +754,18 @@ func (p *PhysicalApply) GetCost(lCount, rCount, lCost, rCost float64) float64 { return cpuCost + lCost + lCount*rCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalApply) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalApply) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } outerChild, innerChild := p.children[1-p.InnerChildIdx], p.children[p.InnerChildIdx] - outerCost, err := outerChild.getPlanCostVer1(taskType, option) + outerCost, err := outerChild.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } - innerCost, err := innerChild.getPlanCostVer1(taskType, option) + innerCost, err := innerChild.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -813,7 +803,7 @@ func (p *PhysicalMergeJoin) GetCost(lCnt, rCnt float64, costFlag uint64) float64 numPairs = 0 } } - if hasCostFlag(costFlag, CostFlagUseTrueCardinality) { + if hasCostFlag(costFlag, coreusage.CostFlagUseTrueCardinality) { numPairs = getOperatorActRows(p) } sessVars := p.SCtx().GetSessionVars() @@ -832,15 +822,15 @@ func (p *PhysicalMergeJoin) GetCost(lCnt, rCnt float64, costFlag uint64) float64 return cpuCost + memoryCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalMergeJoin) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalMergeJoin) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } p.planCost = 0 for _, child := range p.children { - childCost, err := child.getPlanCostVer1(taskType, option) + childCost, err := child.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -852,7 +842,7 @@ func (p *PhysicalMergeJoin) getPlanCostVer1(taskType property.TaskType, option * } // GetCost computes cost of hash join operator itself. -func (p *PhysicalHashJoin) GetCost(lCnt, rCnt float64, _ bool, costFlag uint64, op *physicalOptimizeOp) float64 { +func (p *PhysicalHashJoin) GetCost(lCnt, rCnt float64, _ bool, costFlag uint64, op *coreusage.PhysicalOptimizeOp) float64 { buildCnt, probeCnt := lCnt, rCnt build := p.children[0] // Taking the right as the inner for right join or using the outer to build a hash table. @@ -896,7 +886,7 @@ func (p *PhysicalHashJoin) GetCost(lCnt, rCnt float64, _ bool, costFlag uint64, numPairs = 0 } } - if hasCostFlag(costFlag, CostFlagUseTrueCardinality) { + if hasCostFlag(costFlag, coreusage.CostFlagUseTrueCardinality) { numPairs = getOperatorActRows(p) } // Cost of querying hash table is cheap actually, so we just compute the cost of @@ -939,22 +929,22 @@ func (p *PhysicalHashJoin) GetCost(lCnt, rCnt float64, _ bool, costFlag uint64, return cpuCost + memoryCost + diskCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalHashJoin) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalHashJoin) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } p.planCost = 0 for _, child := range p.children { - childCost, err := child.getPlanCostVer1(taskType, option) + childCost, err := child.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } p.planCost += childCost } p.planCost += p.GetCost(getCardinality(p.children[0], costFlag), getCardinality(p.children[1], costFlag), - taskType == property.MppTaskType, costFlag, option.tracer) + taskType == property.MppTaskType, costFlag, option.GetTracer()) p.planCostInit = true return p.planCost, nil } @@ -974,13 +964,13 @@ func (p *PhysicalStreamAgg) GetCost(inputRows float64, isRoot, _ bool, costFlag return cpuCost + memoryCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalStreamAgg) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalStreamAgg) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } - childCost, err := p.children[0].getPlanCostVer1(taskType, option) + childCost, err := p.children[0].GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -1015,13 +1005,13 @@ func (p *PhysicalHashAgg) GetCost(inputRows float64, isRoot, isMPP bool, costFla return cpuCost + memoryCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalHashAgg) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalHashAgg) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } - childCost, err := p.children[0].getPlanCostVer1(taskType, option) + childCost, err := p.children[0].GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -1063,13 +1053,13 @@ func (p *PhysicalSort) GetCost(count float64, schema *expression.Schema) float64 return cpuCost + memoryCost + diskCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalSort) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalSort) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } - childCost, err := p.children[0].getPlanCostVer1(taskType, option) + childCost, err := p.children[0].GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -1102,13 +1092,13 @@ func (p *PhysicalTopN) GetCost(count float64, isRoot bool) float64 { return cpuCost + memoryCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalTopN) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalTopN) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } - childCost, err := p.children[0].getPlanCostVer1(taskType, option) + childCost, err := p.children[0].GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -1119,7 +1109,7 @@ func (p *PhysicalTopN) getPlanCostVer1(taskType property.TaskType, option *PlanC } // GetCost returns cost of the PointGetPlan. -func (p *BatchPointGetPlan) GetCost(opt *physicalOptimizeOp) float64 { +func (p *BatchPointGetPlan) GetCost(opt *coreusage.PhysicalOptimizeOp) float64 { cols := p.accessCols if cols == nil { return 0 // the cost of BatchGet generated in fast plan optimization is always 0 @@ -1146,13 +1136,13 @@ func (p *BatchPointGetPlan) GetCost(opt *physicalOptimizeOp) float64 { return cost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *BatchPointGetPlan) getPlanCostVer1(_ property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *BatchPointGetPlan) GetPlanCostVer1(_ property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } - p.planCost = p.GetCost(option.tracer) + p.planCost = p.GetCost(option.GetTracer()) p.planCostInit = true return p.planCost, nil } @@ -1170,7 +1160,7 @@ func (p *BatchPointGetPlan) GetAvgRowSize() float64 { } // GetCost returns cost of the PointGetPlan. -func (p *PointGetPlan) GetCost(opt *physicalOptimizeOp) float64 { +func (p *PointGetPlan) GetCost(opt *coreusage.PhysicalOptimizeOp) float64 { cols := p.accessCols if cols == nil { return 0 // the cost of PointGet generated in fast plan optimization is always 0 @@ -1194,13 +1184,13 @@ func (p *PointGetPlan) GetCost(opt *physicalOptimizeOp) float64 { return cost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PointGetPlan) getPlanCostVer1(_ property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PointGetPlan) GetPlanCostVer1(_ property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } - p.planCost = p.GetCost(option.tracer) + p.planCost = p.GetCost(option.GetTracer()) p.planCostInit = true return p.planCost, nil } @@ -1217,15 +1207,15 @@ func (p *PointGetPlan) GetAvgRowSize() float64 { return cardinality.GetIndexAvgRowSize(p.SCtx(), p.StatsInfo().HistColl, cols, p.IndexInfo.Unique) } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalUnionAll) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalUnionAll) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } var childMaxCost float64 for _, child := range p.children { - childCost, err := child.getPlanCostVer1(taskType, option) + childCost, err := child.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -1236,13 +1226,13 @@ func (p *PhysicalUnionAll) getPlanCostVer1(taskType property.TaskType, option *P return p.planCost, nil } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalExchangeReceiver) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalExchangeReceiver) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } - childCost, err := p.children[0].getPlanCostVer1(taskType, option) + childCost, err := p.children[0].GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -1271,8 +1261,8 @@ func getOperatorActRows(operator PhysicalPlan) float64 { } func getCardinality(operator PhysicalPlan, costFlag uint64) float64 { - if hasCostFlag(costFlag, CostFlagUseTrueCardinality) { - actualProbeCnt := operator.getActualProbeCnt(operator.SCtx().GetSessionVars().StmtCtx.RuntimeStatsColl) + if hasCostFlag(costFlag, coreusage.CostFlagUseTrueCardinality) { + actualProbeCnt := operator.GetActualProbeCnt(operator.SCtx().GetSessionVars().StmtCtx.RuntimeStatsColl) if actualProbeCnt == 0 { return 0 } diff --git a/pkg/planner/core/plan_cost_ver2.go b/pkg/planner/core/plan_cost_ver2.go index fd44e4aed344d..58e4e717a80fa 100644 --- a/pkg/planner/core/plan_cost_ver2.go +++ b/pkg/planner/core/plan_cost_ver2.go @@ -17,7 +17,6 @@ package core import ( "fmt" "math" - "strconv" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" @@ -26,56 +25,57 @@ import ( "github.com/pingcap/tidb/pkg/planner/cardinality" "github.com/pingcap/tidb/pkg/planner/property" "github.com/pingcap/tidb/pkg/planner/util" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/util/paging" "github.com/pingcap/tipb/go-tipb" ) // GetPlanCost returns the cost of this plan. -func GetPlanCost(p PhysicalPlan, taskType property.TaskType, option *PlanCostOption) (float64, error) { +func GetPlanCost(p PhysicalPlan, taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { return getPlanCost(p, taskType, option) } // GenPlanCostTrace define a hook function to customize the cost calculation. -var GenPlanCostTrace func(p PhysicalPlan, costV *costVer2, taskType property.TaskType, option *PlanCostOption) +var GenPlanCostTrace func(p PhysicalPlan, costV *coreusage.CostVer2, taskType property.TaskType, option *coreusage.PlanCostOption) -func getPlanCost(p PhysicalPlan, taskType property.TaskType, option *PlanCostOption) (float64, error) { +func getPlanCost(p PhysicalPlan, taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { if p.SCtx().GetSessionVars().CostModelVersion == modelVer2 { - planCost, err := p.getPlanCostVer2(taskType, option) - if traceCost(option) && GenPlanCostTrace != nil { + planCost, err := p.GetPlanCostVer2(taskType, option) + if coreusage.TraceCost(option) && GenPlanCostTrace != nil { GenPlanCostTrace(p, &planCost, taskType, option) } - return planCost.cost, err + return planCost.GetCost(), err } - return p.getPlanCostVer1(taskType, option) + return p.GetPlanCostVer1(taskType, option) } -// getPlanCostVer2 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *basePhysicalPlan) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +// GetPlanCostVer2 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *basePhysicalPlan) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } - childCosts := make([]costVer2, 0, len(p.children)) + childCosts := make([]coreusage.CostVer2, 0, len(p.children)) for _, child := range p.children { - childCost, err := child.getPlanCostVer2(taskType, option) + childCost, err := child.GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } childCosts = append(childCosts, childCost) } if len(childCosts) == 0 { - p.planCostVer2 = newZeroCostVer2(traceCost(option)) + p.planCostVer2 = coreusage.NewZeroCostVer2(coreusage.TraceCost(option)) } else { - p.planCostVer2 = sumCostVer2(childCosts...) + p.planCostVer2 = coreusage.SumCostVer2(childCosts...) } p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = child-cost + filter-cost -func (p *PhysicalSelection) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalSelection) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -84,21 +84,21 @@ func (p *PhysicalSelection) getPlanCostVer2(taskType property.TaskType, option * filterCost := filterCostVer2(option, inputRows, p.Conditions, cpuFactor) - childCost, err := p.children[0].getPlanCostVer2(taskType, option) + childCost, err := p.children[0].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - p.planCostVer2 = sumCostVer2(filterCost, childCost) + p.planCostVer2 = coreusage.SumCostVer2(filterCost, childCost) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = child-cost + proj-cost / concurrency // proj-cost = input-rows * len(expressions) * cpu-factor -func (p *PhysicalProjection) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalProjection) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -111,21 +111,21 @@ func (p *PhysicalProjection) getPlanCostVer2(taskType property.TaskType, option projCost := filterCostVer2(option, inputRows, p.Exprs, cpuFactor) - childCost, err := p.children[0].getPlanCostVer2(taskType, option) + childCost, err := p.children[0].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - p.planCostVer2 = sumCostVer2(childCost, divCostVer2(projCost, concurrency)) + p.planCostVer2 = coreusage.SumCostVer2(childCost, coreusage.DivCostVer2(projCost, concurrency)) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = rows * log2(row-size) * scan-factor // log2(row-size) is from experiments. -func (p *PhysicalIndexScan) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalIndexScan) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -138,11 +138,11 @@ func (p *PhysicalIndexScan) getPlanCostVer2(taskType property.TaskType, option * return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = rows * log2(row-size) * scan-factor // log2(row-size) is from experiments. -func (p *PhysicalTableScan) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalTableScan) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -160,18 +160,18 @@ func (p *PhysicalTableScan) getPlanCostVer2(taskType property.TaskType, option * // give TiFlash a start-up cost to let the optimizer prefers to use TiKV to process small table scans. if p.StoreType == kv.TiFlash { - p.planCostVer2 = sumCostVer2(p.planCostVer2, scanCostVer2(option, 10000, rowSize, scanFactor)) + p.planCostVer2 = coreusage.SumCostVer2(p.planCostVer2, scanCostVer2(option, 10000, rowSize, scanFactor)) } p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = (child-cost + net-cost) / concurrency // net-cost = rows * row-size * net-factor -func (p *PhysicalIndexReader) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalIndexReader) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -182,21 +182,21 @@ func (p *PhysicalIndexReader) getPlanCostVer2(taskType property.TaskType, option netCost := netCostVer2(option, rows, rowSize, netFactor) - childCost, err := p.indexPlan.getPlanCostVer2(property.CopSingleReadTaskType, option) + childCost, err := p.indexPlan.GetPlanCostVer2(property.CopSingleReadTaskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - p.planCostVer2 = divCostVer2(sumCostVer2(childCost, netCost), concurrency) + p.planCostVer2 = coreusage.DivCostVer2(coreusage.SumCostVer2(childCost, netCost), concurrency) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = (child-cost + net-cost) / concurrency // net-cost = rows * row-size * net-factor -func (p *PhysicalTableReader) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalTableReader) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -211,23 +211,23 @@ func (p *PhysicalTableReader) getPlanCostVer2(taskType property.TaskType, option netCost := netCostVer2(option, rows, rowSize, netFactor) - childCost, err := p.tablePlan.getPlanCostVer2(childType, option) + childCost, err := p.tablePlan.GetPlanCostVer2(childType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - p.planCostVer2 = divCostVer2(sumCostVer2(childCost, netCost), concurrency) + p.planCostVer2 = coreusage.DivCostVer2(coreusage.SumCostVer2(childCost, netCost), concurrency) p.planCostInit = true // consider tidb_enforce_mpp if p.StoreType == kv.TiFlash && p.SCtx().GetSessionVars().IsMPPEnforced() && - !hasCostFlag(option.CostFlag, CostFlagRecalculate) { // show the real cost in explain-statements - p.planCostVer2 = divCostVer2(p.planCostVer2, 1000000000) + !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { // show the real cost in explain-statements + p.planCostVer2 = coreusage.DivCostVer2(p.planCostVer2, 1000000000) } return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = index-side-cost + (table-side-cost + double-read-cost) / double-read-concurrency // index-side-cost = (index-child-cost + index-net-cost) / dist-concurrency # same with IndexReader // table-side-cost = (table-child-cost + table-net-cost) / dist-concurrency # same with TableReader @@ -235,8 +235,8 @@ func (p *PhysicalTableReader) getPlanCostVer2(taskType property.TaskType, option // double-read-request-cost = double-read-tasks * request-factor // double-read-cpu-cost = index-rows * cpu-factor // double-read-tasks = index-rows / batch-size * task-per-batch # task-per-batch is a magic number now -func (p *PhysicalIndexLookUpReader) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalIndexLookUpReader) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -252,83 +252,83 @@ func (p *PhysicalIndexLookUpReader) getPlanCostVer2(taskType property.TaskType, // index-side indexNetCost := netCostVer2(option, indexRows, indexRowSize, netFactor) - indexChildCost, err := p.indexPlan.getPlanCostVer2(property.CopMultiReadTaskType, option) + indexChildCost, err := p.indexPlan.GetPlanCostVer2(property.CopMultiReadTaskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - indexSideCost := divCostVer2(sumCostVer2(indexNetCost, indexChildCost), distConcurrency) + indexSideCost := coreusage.DivCostVer2(coreusage.SumCostVer2(indexNetCost, indexChildCost), distConcurrency) // table-side tableNetCost := netCostVer2(option, tableRows, tableRowSize, netFactor) - tableChildCost, err := p.tablePlan.getPlanCostVer2(property.CopMultiReadTaskType, option) + tableChildCost, err := p.tablePlan.GetPlanCostVer2(property.CopMultiReadTaskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - tableSideCost := divCostVer2(sumCostVer2(tableNetCost, tableChildCost), distConcurrency) + tableSideCost := coreusage.DivCostVer2(coreusage.SumCostVer2(tableNetCost, tableChildCost), distConcurrency) doubleReadRows := indexRows - doubleReadCPUCost := newCostVer2(option, cpuFactor, + doubleReadCPUCost := coreusage.NewCostVer2(option, cpuFactor, indexRows*cpuFactor.Value, func() string { return fmt.Sprintf("double-read-cpu(%v*%v)", doubleReadRows, cpuFactor) }) batchSize := float64(p.SCtx().GetSessionVars().IndexLookupSize) taskPerBatch := 32.0 // TODO: remove this magic number doubleReadTasks := doubleReadRows / batchSize * taskPerBatch doubleReadRequestCost := doubleReadCostVer2(option, doubleReadTasks, requestFactor) - doubleReadCost := sumCostVer2(doubleReadCPUCost, doubleReadRequestCost) + doubleReadCost := coreusage.SumCostVer2(doubleReadCPUCost, doubleReadRequestCost) - p.planCostVer2 = sumCostVer2(indexSideCost, divCostVer2(sumCostVer2(tableSideCost, doubleReadCost), doubleReadConcurrency)) + p.planCostVer2 = coreusage.SumCostVer2(indexSideCost, coreusage.DivCostVer2(coreusage.SumCostVer2(tableSideCost, doubleReadCost), doubleReadConcurrency)) if p.SCtx().GetSessionVars().EnablePaging && p.expectedCnt > 0 && p.expectedCnt <= paging.Threshold { // if the expectCnt is below the paging threshold, using paging API p.Paging = true // TODO: move this operation from cost model to physical optimization - p.planCostVer2 = mulCostVer2(p.planCostVer2, 0.6) + p.planCostVer2 = coreusage.MulCostVer2(p.planCostVer2, 0.6) } p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = table-side-cost + sum(index-side-cost) // index-side-cost = (index-child-cost + index-net-cost) / dist-concurrency # same with IndexReader // table-side-cost = (table-child-cost + table-net-cost) / dist-concurrency # same with TableReader -func (p *PhysicalIndexMergeReader) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalIndexMergeReader) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } netFactor := getTaskNetFactorVer2(p, taskType) distConcurrency := float64(p.SCtx().GetSessionVars().DistSQLScanConcurrency()) - var tableSideCost costVer2 + var tableSideCost coreusage.CostVer2 if tablePath := p.tablePlan; tablePath != nil { rows := getCardinality(tablePath, option.CostFlag) rowSize := getAvgRowSize(tablePath.StatsInfo(), tablePath.Schema().Columns) tableNetCost := netCostVer2(option, rows, rowSize, netFactor) - tableChildCost, err := tablePath.getPlanCostVer2(taskType, option) + tableChildCost, err := tablePath.GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - tableSideCost = divCostVer2(sumCostVer2(tableNetCost, tableChildCost), distConcurrency) + tableSideCost = coreusage.DivCostVer2(coreusage.SumCostVer2(tableNetCost, tableChildCost), distConcurrency) } - indexSideCost := make([]costVer2, 0, len(p.partialPlans)) + indexSideCost := make([]coreusage.CostVer2, 0, len(p.partialPlans)) for _, indexPath := range p.partialPlans { rows := getCardinality(indexPath, option.CostFlag) rowSize := getAvgRowSize(indexPath.StatsInfo(), indexPath.Schema().Columns) indexNetCost := netCostVer2(option, rows, rowSize, netFactor) - indexChildCost, err := indexPath.getPlanCostVer2(taskType, option) + indexChildCost, err := indexPath.GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } indexSideCost = append(indexSideCost, - divCostVer2(sumCostVer2(indexNetCost, indexChildCost), distConcurrency)) + coreusage.DivCostVer2(coreusage.SumCostVer2(indexNetCost, indexChildCost), distConcurrency)) } - sumIndexSideCost := sumCostVer2(indexSideCost...) + sumIndexSideCost := coreusage.SumCostVer2(indexSideCost...) - p.planCostVer2 = sumCostVer2(tableSideCost, sumIndexSideCost) + p.planCostVer2 = coreusage.SumCostVer2(tableSideCost, sumIndexSideCost) // give a bias to pushDown limit, since it will get the same cost with NON_PUSH_DOWN_LIMIT case via expect count. // push down limit case may reduce cop request consumption if any in some cases. // @@ -341,13 +341,13 @@ func (p *PhysicalIndexMergeReader) getPlanCostVer2(taskType property.TaskType, o // will have the same cost, actually if limit are more close to the fetch side, the fewer rows that table plan need to read. // todo: refine the cost computation out from cost model. if p.PushedLimit != nil { - p.planCostVer2 = mulCostVer2(p.planCostVer2, 0.99) + p.planCostVer2 = coreusage.MulCostVer2(p.planCostVer2, 0.99) } p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = child-cost + sort-cpu-cost + sort-mem-cost + sort-disk-cost // sort-cpu-cost = rows * log2(rows) * len(sort-items) * cpu-factor // if no spill: @@ -356,8 +356,8 @@ func (p *PhysicalIndexMergeReader) getPlanCostVer2(taskType property.TaskType, o // else if spill: // 1. sort-mem-cost = mem-quota * mem-factor // 2. sort-disk-cost = rows * row-size * disk-factor -func (p *PhysicalSort) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalSort) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -375,37 +375,37 @@ func (p *PhysicalSort) getPlanCostVer2(taskType property.TaskType, option *PlanC sortCPUCost := orderCostVer2(option, rows, rows, p.ByItems, cpuFactor) - var sortMemCost, sortDiskCost costVer2 + var sortMemCost, sortDiskCost coreusage.CostVer2 if !spill { - sortMemCost = newCostVer2(option, memFactor, + sortMemCost = coreusage.NewCostVer2(option, memFactor, rows*rowSize*memFactor.Value, func() string { return fmt.Sprintf("sortMem(%v*%v*%v)", rows, rowSize, memFactor) }) - sortDiskCost = zeroCostVer2 + sortDiskCost = coreusage.ZeroCostVer2 } else { - sortMemCost = newCostVer2(option, memFactor, + sortMemCost = coreusage.NewCostVer2(option, memFactor, float64(memQuota)*memFactor.Value, func() string { return fmt.Sprintf("sortMem(%v*%v)", memQuota, memFactor) }) - sortDiskCost = newCostVer2(option, diskFactor, + sortDiskCost = coreusage.NewCostVer2(option, diskFactor, rows*rowSize*diskFactor.Value, func() string { return fmt.Sprintf("sortDisk(%v*%v*%v)", rows, rowSize, diskFactor) }) } - childCost, err := p.children[0].getPlanCostVer2(taskType, option) + childCost, err := p.children[0].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - p.planCostVer2 = sumCostVer2(childCost, sortCPUCost, sortMemCost, sortDiskCost) + p.planCostVer2 = coreusage.SumCostVer2(childCost, sortCPUCost, sortMemCost, sortDiskCost) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = child-cost + topn-cpu-cost + topn-mem-cost // topn-cpu-cost = rows * log2(N) * len(sort-items) * cpu-factor // topn-mem-cost = N * row-size * mem-factor -func (p *PhysicalTopN) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalTopN) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -421,24 +421,24 @@ func (p *PhysicalTopN) getPlanCostVer2(taskType property.TaskType, option *PlanC memFactor := getTaskMemFactorVer2(p, taskType) topNCPUCost := orderCostVer2(option, rows, n, p.ByItems, cpuFactor) - topNMemCost := newCostVer2(option, memFactor, + topNMemCost := coreusage.NewCostVer2(option, memFactor, n*rowSize*memFactor.Value, func() string { return fmt.Sprintf("topMem(%v*%v*%v)", n, rowSize, memFactor) }) - childCost, err := p.children[0].getPlanCostVer2(taskType, option) + childCost, err := p.children[0].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - p.planCostVer2 = sumCostVer2(childCost, topNCPUCost, topNMemCost) + p.planCostVer2 = coreusage.SumCostVer2(childCost, topNCPUCost, topNMemCost) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = child-cost + agg-cost + group-cost -func (p *PhysicalStreamAgg) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalStreamAgg) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -448,20 +448,20 @@ func (p *PhysicalStreamAgg) getPlanCostVer2(taskType property.TaskType, option * aggCost := aggCostVer2(option, rows, p.AggFuncs, cpuFactor) groupCost := groupCostVer2(option, rows, p.GroupByItems, cpuFactor) - childCost, err := p.children[0].getPlanCostVer2(taskType, option) + childCost, err := p.children[0].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - p.planCostVer2 = sumCostVer2(childCost, aggCost, groupCost) + p.planCostVer2 = coreusage.SumCostVer2(childCost, aggCost, groupCost) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = child-cost + (agg-cost + group-cost + hash-build-cost + hash-probe-cost) / concurrency -func (p *PhysicalHashAgg) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalHashAgg) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -476,24 +476,24 @@ func (p *PhysicalHashAgg) getPlanCostVer2(taskType property.TaskType, option *Pl groupCost := groupCostVer2(option, inputRows, p.GroupByItems, cpuFactor) hashBuildCost := hashBuildCostVer2(option, outputRows, outputRowSize, float64(len(p.GroupByItems)), cpuFactor, memFactor) hashProbeCost := hashProbeCostVer2(option, inputRows, float64(len(p.GroupByItems)), cpuFactor) - startCost := newCostVer2(option, cpuFactor, + startCost := coreusage.NewCostVer2(option, cpuFactor, 10*3*cpuFactor.Value, // 10rows * 3func * cpuFactor func() string { return fmt.Sprintf("cpu(10*3*%v)", cpuFactor) }) - childCost, err := p.children[0].getPlanCostVer2(taskType, option) + childCost, err := p.children[0].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - p.planCostVer2 = sumCostVer2(startCost, childCost, divCostVer2(sumCostVer2(aggCost, groupCost, hashBuildCost, hashProbeCost), concurrency)) + p.planCostVer2 = coreusage.SumCostVer2(startCost, childCost, coreusage.DivCostVer2(coreusage.SumCostVer2(aggCost, groupCost, hashBuildCost, hashProbeCost), concurrency)) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = left-child-cost + right-child-cost + filter-cost + group-cost -func (p *PhysicalMergeJoin) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalMergeJoin) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -501,31 +501,31 @@ func (p *PhysicalMergeJoin) getPlanCostVer2(taskType property.TaskType, option * rightRows := getCardinality(p.children[1], option.CostFlag) cpuFactor := getTaskCPUFactorVer2(p, taskType) - filterCost := sumCostVer2(filterCostVer2(option, leftRows, p.LeftConditions, cpuFactor), + filterCost := coreusage.SumCostVer2(filterCostVer2(option, leftRows, p.LeftConditions, cpuFactor), filterCostVer2(option, rightRows, p.RightConditions, cpuFactor)) - groupCost := sumCostVer2(groupCostVer2(option, leftRows, cols2Exprs(p.LeftJoinKeys), cpuFactor), + groupCost := coreusage.SumCostVer2(groupCostVer2(option, leftRows, cols2Exprs(p.LeftJoinKeys), cpuFactor), groupCostVer2(option, rightRows, cols2Exprs(p.LeftJoinKeys), cpuFactor)) - leftChildCost, err := p.children[0].getPlanCostVer2(taskType, option) + leftChildCost, err := p.children[0].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - rightChildCost, err := p.children[1].getPlanCostVer2(taskType, option) + rightChildCost, err := p.children[1].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - p.planCostVer2 = sumCostVer2(leftChildCost, rightChildCost, filterCost, groupCost) + p.planCostVer2 = coreusage.SumCostVer2(leftChildCost, rightChildCost, filterCost, groupCost) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = build-child-cost + probe-child-cost + // build-hash-cost + build-filter-cost + // (probe-filter-cost + probe-hash-cost) / concurrency -func (p *PhysicalHashJoin) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalHashJoin) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -550,31 +550,31 @@ func (p *PhysicalHashJoin) getPlanCostVer2(taskType property.TaskType, option *P probeFilterCost := filterCostVer2(option, probeRows, probeFilters, cpuFactor) probeHashCost := hashProbeCostVer2(option, probeRows, float64(len(probeKeys)), cpuFactor) - buildChildCost, err := build.getPlanCostVer2(taskType, option) + buildChildCost, err := build.GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - probeChildCost, err := probe.getPlanCostVer2(taskType, option) + probeChildCost, err := probe.GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } if taskType == property.MppTaskType { // BCast or Shuffle Join, use mppConcurrency - p.planCostVer2 = sumCostVer2(buildChildCost, probeChildCost, - divCostVer2(sumCostVer2(buildHashCost, buildFilterCost, probeHashCost, probeFilterCost), mppConcurrency)) + p.planCostVer2 = coreusage.SumCostVer2(buildChildCost, probeChildCost, + coreusage.DivCostVer2(coreusage.SumCostVer2(buildHashCost, buildFilterCost, probeHashCost, probeFilterCost), mppConcurrency)) } else { // TiDB HashJoin - startCost := newCostVer2(option, cpuFactor, + startCost := coreusage.NewCostVer2(option, cpuFactor, 10*3*cpuFactor.Value, // 10rows * 3func * cpuFactor func() string { return fmt.Sprintf("cpu(10*3*%v)", cpuFactor) }) - p.planCostVer2 = sumCostVer2(startCost, buildChildCost, probeChildCost, buildHashCost, buildFilterCost, - divCostVer2(sumCostVer2(probeFilterCost, probeHashCost), tidbConcurrency)) + p.planCostVer2 = coreusage.SumCostVer2(startCost, buildChildCost, probeChildCost, buildHashCost, buildFilterCost, + coreusage.DivCostVer2(coreusage.SumCostVer2(probeFilterCost, probeHashCost), tidbConcurrency)) } p.planCostInit = true return p.planCostVer2, nil } -func (p *PhysicalIndexJoin) getIndexJoinCostVer2(taskType property.TaskType, option *PlanCostOption, indexJoinType int) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalIndexJoin) getIndexJoinCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption, indexJoinType int) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -591,29 +591,29 @@ func (p *PhysicalIndexJoin) getIndexJoinCostVer2(taskType property.TaskType, opt requestFactor := getTaskRequestFactorVer2(p, taskType) buildFilterCost := filterCostVer2(option, buildRows, buildFilters, cpuFactor) - buildChildCost, err := build.getPlanCostVer2(taskType, option) + buildChildCost, err := build.GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - buildTaskCost := newCostVer2(option, cpuFactor, + buildTaskCost := coreusage.NewCostVer2(option, cpuFactor, buildRows*10*cpuFactor.Value, func() string { return fmt.Sprintf("cpu(%v*10*%v)", buildRows, cpuFactor) }) - startCost := newCostVer2(option, cpuFactor, + startCost := coreusage.NewCostVer2(option, cpuFactor, 10*3*cpuFactor.Value, func() string { return fmt.Sprintf("cpu(10*3*%v)", cpuFactor) }) probeFilterCost := filterCostVer2(option, probeRowsTot, probeFilters, cpuFactor) - probeChildCost, err := probe.getPlanCostVer2(taskType, option) + probeChildCost, err := probe.GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - var hashTableCost costVer2 + var hashTableCost coreusage.CostVer2 switch indexJoinType { case 1: // IndexHashJoin hashTableCost = hashBuildCostVer2(option, buildRows, buildRowSize, float64(len(p.RightJoinKeys)), cpuFactor, memFactor) case 2: // IndexMergeJoin - hashTableCost = newZeroCostVer2(traceCost(option)) + hashTableCost = coreusage.NewZeroCostVer2(coreusage.TraceCost(option)) default: // IndexJoin hashTableCost = hashBuildCostVer2(option, probeRowsTot, probeRowSize, float64(len(p.LeftJoinKeys)), cpuFactor, memFactor) } @@ -623,44 +623,44 @@ func (p *PhysicalIndexJoin) getIndexJoinCostVer2(taskType property.TaskType, opt // Use an empirical value batchRatio to handle this now. // TODO: remove this empirical value. batchRatio := 6.0 - probeCost := divCostVer2(mulCostVer2(probeChildCost, buildRows), batchRatio) + probeCost := coreusage.DivCostVer2(coreusage.MulCostVer2(probeChildCost, buildRows), batchRatio) // Double Read Cost - doubleReadCost := newZeroCostVer2(traceCost(option)) + doubleReadCost := coreusage.NewZeroCostVer2(coreusage.TraceCost(option)) if p.SCtx().GetSessionVars().IndexJoinDoubleReadPenaltyCostRate > 0 { batchSize := float64(p.SCtx().GetSessionVars().IndexJoinBatchSize) taskPerBatch := 1024.0 // TODO: remove this magic number doubleReadTasks := buildRows / batchSize * taskPerBatch doubleReadCost = doubleReadCostVer2(option, doubleReadTasks, requestFactor) - doubleReadCost = mulCostVer2(doubleReadCost, p.SCtx().GetSessionVars().IndexJoinDoubleReadPenaltyCostRate) + doubleReadCost = coreusage.MulCostVer2(doubleReadCost, p.SCtx().GetSessionVars().IndexJoinDoubleReadPenaltyCostRate) } - p.planCostVer2 = sumCostVer2(startCost, buildChildCost, buildFilterCost, buildTaskCost, divCostVer2(sumCostVer2(doubleReadCost, probeCost, probeFilterCost, hashTableCost), probeConcurrency)) + p.planCostVer2 = coreusage.SumCostVer2(startCost, buildChildCost, buildFilterCost, buildTaskCost, coreusage.DivCostVer2(coreusage.SumCostVer2(doubleReadCost, probeCost, probeFilterCost, hashTableCost), probeConcurrency)) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = build-child-cost + build-filter-cost + // (probe-cost + probe-filter-cost) / concurrency // probe-cost = probe-child-cost * build-rows / batchRatio -func (p *PhysicalIndexJoin) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { +func (p *PhysicalIndexJoin) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { return p.getIndexJoinCostVer2(taskType, option, 0) } -func (p *PhysicalIndexHashJoin) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { +func (p *PhysicalIndexHashJoin) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { return p.getIndexJoinCostVer2(taskType, option, 1) } -func (p *PhysicalIndexMergeJoin) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { +func (p *PhysicalIndexMergeJoin) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { return p.getIndexJoinCostVer2(taskType, option, 2) } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = build-child-cost + build-filter-cost + probe-cost + probe-filter-cost // probe-cost = probe-child-cost * build-rows -func (p *PhysicalApply) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalApply) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -670,48 +670,48 @@ func (p *PhysicalApply) getPlanCostVer2(taskType property.TaskType, option *Plan cpuFactor := getTaskCPUFactorVer2(p, taskType) buildFilterCost := filterCostVer2(option, buildRows, p.LeftConditions, cpuFactor) - buildChildCost, err := p.children[0].getPlanCostVer2(taskType, option) + buildChildCost, err := p.children[0].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } probeFilterCost := filterCostVer2(option, probeRowsTot, p.RightConditions, cpuFactor) - probeChildCost, err := p.children[1].getPlanCostVer2(taskType, option) + probeChildCost, err := p.children[1].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - probeCost := mulCostVer2(probeChildCost, buildRows) + probeCost := coreusage.MulCostVer2(probeChildCost, buildRows) - p.planCostVer2 = sumCostVer2(buildChildCost, buildFilterCost, probeCost, probeFilterCost) + p.planCostVer2 = coreusage.SumCostVer2(buildChildCost, buildFilterCost, probeCost, probeFilterCost) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 calculates the cost of the plan if it has not been calculated yet and returns the cost. +// GetPlanCostVer2 calculates the cost of the plan if it has not been calculated yet and returns the cost. // plan-cost = sum(child-cost) / concurrency -func (p *PhysicalUnionAll) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalUnionAll) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } concurrency := float64(p.SCtx().GetSessionVars().UnionConcurrency()) - childCosts := make([]costVer2, 0, len(p.children)) + childCosts := make([]coreusage.CostVer2, 0, len(p.children)) for _, child := range p.children { - childCost, err := child.getPlanCostVer2(taskType, option) + childCost, err := child.GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } childCosts = append(childCosts, childCost) } - p.planCostVer2 = divCostVer2(sumCostVer2(childCosts...), concurrency) + p.planCostVer2 = coreusage.DivCostVer2(coreusage.SumCostVer2(childCosts...), concurrency) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = child-cost + net-cost -func (p *PhysicalExchangeReceiver) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalExchangeReceiver) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -726,28 +726,28 @@ func (p *PhysicalExchangeReceiver) getPlanCostVer2(taskType property.TaskType, o netCost := netCostVer2(option, rows, rowSize, netFactor) if isBCast { - netCost = mulCostVer2(netCost, numNode) + netCost = coreusage.MulCostVer2(netCost, numNode) } - childCost, err := p.children[0].getPlanCostVer2(taskType, option) + childCost, err := p.children[0].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - p.planCostVer2 = sumCostVer2(childCost, netCost) + p.planCostVer2 = coreusage.SumCostVer2(childCost, netCost) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: -func (p *PointGetPlan) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: +func (p *PointGetPlan) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } if p.accessCols == nil { // from fast plan code path - p.planCostVer2 = zeroCostVer2 + p.planCostVer2 = coreusage.ZeroCostVer2 p.planCostInit = true - return zeroCostVer2, nil + return coreusage.ZeroCostVer2, nil } rowSize := getAvgRowSize(p.StatsInfo(), p.schema.Columns) netFactor := getTaskNetFactorVer2(p, taskType) @@ -757,16 +757,16 @@ func (p *PointGetPlan) getPlanCostVer2(taskType property.TaskType, option *PlanC return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: -func (p *BatchPointGetPlan) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: +func (p *BatchPointGetPlan) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } if p.accessCols == nil { // from fast plan code path - p.planCostVer2 = zeroCostVer2 + p.planCostVer2 = coreusage.ZeroCostVer2 p.planCostInit = true - return zeroCostVer2, nil + return coreusage.ZeroCostVer2, nil } rows := getCardinality(p, option.CostFlag) rowSize := getAvgRowSize(p.StatsInfo(), p.schema.Columns) @@ -777,8 +777,8 @@ func (p *BatchPointGetPlan) getPlanCostVer2(taskType property.TaskType, option * return p.planCostVer2, nil } -func (p *PhysicalCTE) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalCTE) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -792,39 +792,39 @@ func (p *PhysicalCTE) getPlanCostVer2(taskType property.TaskType, option *PlanCo return p.planCostVer2, nil } -func scanCostVer2(option *PlanCostOption, rows, rowSize float64, scanFactor costVer2Factor) costVer2 { +func scanCostVer2(option *coreusage.PlanCostOption, rows, rowSize float64, scanFactor coreusage.CostVer2Factor) coreusage.CostVer2 { if rowSize < 1 { rowSize = 1 } - return newCostVer2(option, scanFactor, + return coreusage.NewCostVer2(option, scanFactor, // rows * log(row-size) * scanFactor, log2 from experiments rows*math.Log2(rowSize)*scanFactor.Value, func() string { return fmt.Sprintf("scan(%v*logrowsize(%v)*%v)", rows, rowSize, scanFactor) }) } -func netCostVer2(option *PlanCostOption, rows, rowSize float64, netFactor costVer2Factor) costVer2 { - return newCostVer2(option, netFactor, +func netCostVer2(option *coreusage.PlanCostOption, rows, rowSize float64, netFactor coreusage.CostVer2Factor) coreusage.CostVer2 { + return coreusage.NewCostVer2(option, netFactor, rows*rowSize*netFactor.Value, func() string { return fmt.Sprintf("net(%v*rowsize(%v)*%v)", rows, rowSize, netFactor) }) } -func filterCostVer2(option *PlanCostOption, rows float64, filters []expression.Expression, cpuFactor costVer2Factor) costVer2 { +func filterCostVer2(option *coreusage.PlanCostOption, rows float64, filters []expression.Expression, cpuFactor coreusage.CostVer2Factor) coreusage.CostVer2 { numFuncs := numFunctions(filters) - return newCostVer2(option, cpuFactor, + return coreusage.NewCostVer2(option, cpuFactor, rows*numFuncs*cpuFactor.Value, func() string { return fmt.Sprintf("cpu(%v*filters(%v)*%v)", rows, numFuncs, cpuFactor) }) } -func aggCostVer2(option *PlanCostOption, rows float64, aggFuncs []*aggregation.AggFuncDesc, cpuFactor costVer2Factor) costVer2 { - return newCostVer2(option, cpuFactor, +func aggCostVer2(option *coreusage.PlanCostOption, rows float64, aggFuncs []*aggregation.AggFuncDesc, cpuFactor coreusage.CostVer2Factor) coreusage.CostVer2 { + return coreusage.NewCostVer2(option, cpuFactor, // TODO: consider types of agg-funcs rows*float64(len(aggFuncs))*cpuFactor.Value, func() string { return fmt.Sprintf("agg(%v*aggs(%v)*%v)", rows, len(aggFuncs), cpuFactor) }) } -func groupCostVer2(option *PlanCostOption, rows float64, groupItems []expression.Expression, cpuFactor costVer2Factor) costVer2 { +func groupCostVer2(option *coreusage.PlanCostOption, rows float64, groupItems []expression.Expression, cpuFactor coreusage.CostVer2Factor) coreusage.CostVer2 { numFuncs := numFunctions(groupItems) - return newCostVer2(option, cpuFactor, + return coreusage.NewCostVer2(option, cpuFactor, rows*numFuncs*cpuFactor.Value, func() string { return fmt.Sprintf("group(%v*cols(%v)*%v)", rows, numFuncs, cpuFactor) }) } @@ -841,106 +841,97 @@ func numFunctions(exprs []expression.Expression) float64 { return num } -func orderCostVer2(option *PlanCostOption, rows, n float64, byItems []*util.ByItems, cpuFactor costVer2Factor) costVer2 { +func orderCostVer2(option *coreusage.PlanCostOption, rows, n float64, byItems []*util.ByItems, cpuFactor coreusage.CostVer2Factor) coreusage.CostVer2 { numFuncs := 0 for _, byItem := range byItems { if _, ok := byItem.Expr.(*expression.ScalarFunction); ok { numFuncs++ } } - exprCost := newCostVer2(option, cpuFactor, + exprCost := coreusage.NewCostVer2(option, cpuFactor, rows*float64(numFuncs)*cpuFactor.Value, func() string { return fmt.Sprintf("exprCPU(%v*%v*%v)", rows, numFuncs, cpuFactor) }) - orderCost := newCostVer2(option, cpuFactor, + orderCost := coreusage.NewCostVer2(option, cpuFactor, rows*math.Log2(n)*cpuFactor.Value, func() string { return fmt.Sprintf("orderCPU(%v*log(%v)*%v)", rows, n, cpuFactor) }) - return sumCostVer2(exprCost, orderCost) + return coreusage.SumCostVer2(exprCost, orderCost) } -func hashBuildCostVer2(option *PlanCostOption, buildRows, buildRowSize, nKeys float64, cpuFactor, memFactor costVer2Factor) costVer2 { +func hashBuildCostVer2(option *coreusage.PlanCostOption, buildRows, buildRowSize, nKeys float64, cpuFactor, memFactor coreusage.CostVer2Factor) coreusage.CostVer2 { // TODO: 1) consider types of keys, 2) dedicated factor for build-probe hash table - hashKeyCost := newCostVer2(option, cpuFactor, + hashKeyCost := coreusage.NewCostVer2(option, cpuFactor, buildRows*nKeys*cpuFactor.Value, func() string { return fmt.Sprintf("hashkey(%v*%v*%v)", buildRows, nKeys, cpuFactor) }) - hashMemCost := newCostVer2(option, memFactor, + hashMemCost := coreusage.NewCostVer2(option, memFactor, buildRows*buildRowSize*memFactor.Value, func() string { return fmt.Sprintf("hashmem(%v*%v*%v)", buildRows, buildRowSize, memFactor) }) - hashBuildCost := newCostVer2(option, cpuFactor, + hashBuildCost := coreusage.NewCostVer2(option, cpuFactor, buildRows*cpuFactor.Value, func() string { return fmt.Sprintf("hashbuild(%v*%v)", buildRows, cpuFactor) }) - return sumCostVer2(hashKeyCost, hashMemCost, hashBuildCost) + return coreusage.SumCostVer2(hashKeyCost, hashMemCost, hashBuildCost) } -func hashProbeCostVer2(option *PlanCostOption, probeRows, nKeys float64, cpuFactor costVer2Factor) costVer2 { +func hashProbeCostVer2(option *coreusage.PlanCostOption, probeRows, nKeys float64, cpuFactor coreusage.CostVer2Factor) coreusage.CostVer2 { // TODO: 1) consider types of keys, 2) dedicated factor for build-probe hash table - hashKeyCost := newCostVer2(option, cpuFactor, + hashKeyCost := coreusage.NewCostVer2(option, cpuFactor, probeRows*nKeys*cpuFactor.Value, func() string { return fmt.Sprintf("hashkey(%v*%v*%v)", probeRows, nKeys, cpuFactor) }) - hashProbeCost := newCostVer2(option, cpuFactor, + hashProbeCost := coreusage.NewCostVer2(option, cpuFactor, probeRows*cpuFactor.Value, func() string { return fmt.Sprintf("hashprobe(%v*%v)", probeRows, cpuFactor) }) - return sumCostVer2(hashKeyCost, hashProbeCost) + return coreusage.SumCostVer2(hashKeyCost, hashProbeCost) } // For simplicity and robust, only operators that need double-read like IndexLookup and IndexJoin consider this cost. -func doubleReadCostVer2(option *PlanCostOption, numTasks float64, requestFactor costVer2Factor) costVer2 { - return newCostVer2(option, requestFactor, +func doubleReadCostVer2(option *coreusage.PlanCostOption, numTasks float64, requestFactor coreusage.CostVer2Factor) coreusage.CostVer2 { + return coreusage.NewCostVer2(option, requestFactor, numTasks*requestFactor.Value, func() string { return fmt.Sprintf("doubleRead(tasks(%v)*%v)", numTasks, requestFactor) }) } -type costVer2Factor struct { - Name string - Value float64 -} - -func (f costVer2Factor) String() string { - return fmt.Sprintf("%s(%v)", f.Name, f.Value) -} - // In Cost Ver2, we hide cost factors from users and deprecate SQL variables like `tidb_opt_scan_factor`. type costVer2Factors struct { - TiDBTemp costVer2Factor // operations on TiDB temporary table - TiKVScan costVer2Factor // per byte - TiKVDescScan costVer2Factor // per byte - TiFlashScan costVer2Factor // per byte - TiDBCPU costVer2Factor // per column or expression - TiKVCPU costVer2Factor // per column or expression - TiFlashCPU costVer2Factor // per column or expression - TiDB2KVNet costVer2Factor // per byte - TiDB2FlashNet costVer2Factor // per byte - TiFlashMPPNet costVer2Factor // per byte - TiDBMem costVer2Factor // per byte - TiKVMem costVer2Factor // per byte - TiFlashMem costVer2Factor // per byte - TiDBDisk costVer2Factor // per byte - TiDBRequest costVer2Factor // per net request + TiDBTemp coreusage.CostVer2Factor // operations on TiDB temporary table + TiKVScan coreusage.CostVer2Factor // per byte + TiKVDescScan coreusage.CostVer2Factor // per byte + TiFlashScan coreusage.CostVer2Factor // per byte + TiDBCPU coreusage.CostVer2Factor // per column or expression + TiKVCPU coreusage.CostVer2Factor // per column or expression + TiFlashCPU coreusage.CostVer2Factor // per column or expression + TiDB2KVNet coreusage.CostVer2Factor // per byte + TiDB2FlashNet coreusage.CostVer2Factor // per byte + TiFlashMPPNet coreusage.CostVer2Factor // per byte + TiDBMem coreusage.CostVer2Factor // per byte + TiKVMem coreusage.CostVer2Factor // per byte + TiFlashMem coreusage.CostVer2Factor // per byte + TiDBDisk coreusage.CostVer2Factor // per byte + TiDBRequest coreusage.CostVer2Factor // per net request } -func (c costVer2Factors) tolist() (l []costVer2Factor) { +func (c costVer2Factors) tolist() (l []coreusage.CostVer2Factor) { return append(l, c.TiDBTemp, c.TiKVScan, c.TiKVDescScan, c.TiFlashScan, c.TiDBCPU, c.TiKVCPU, c.TiFlashCPU, c.TiDB2KVNet, c.TiDB2FlashNet, c.TiFlashMPPNet, c.TiDBMem, c.TiKVMem, c.TiFlashMem, c.TiDBDisk, c.TiDBRequest) } var defaultVer2Factors = costVer2Factors{ - TiDBTemp: costVer2Factor{"tidb_temp_table_factor", 0.00}, - TiKVScan: costVer2Factor{"tikv_scan_factor", 40.70}, - TiKVDescScan: costVer2Factor{"tikv_desc_scan_factor", 61.05}, - TiFlashScan: costVer2Factor{"tiflash_scan_factor", 11.60}, - TiDBCPU: costVer2Factor{"tidb_cpu_factor", 49.90}, - TiKVCPU: costVer2Factor{"tikv_cpu_factor", 49.90}, - TiFlashCPU: costVer2Factor{"tiflash_cpu_factor", 2.40}, - TiDB2KVNet: costVer2Factor{"tidb_kv_net_factor", 3.96}, - TiDB2FlashNet: costVer2Factor{"tidb_flash_net_factor", 2.20}, - TiFlashMPPNet: costVer2Factor{"tiflash_mpp_net_factor", 1.00}, - TiDBMem: costVer2Factor{"tidb_mem_factor", 0.20}, - TiKVMem: costVer2Factor{"tikv_mem_factor", 0.20}, - TiFlashMem: costVer2Factor{"tiflash_mem_factor", 0.05}, - TiDBDisk: costVer2Factor{"tidb_disk_factor", 200.00}, - TiDBRequest: costVer2Factor{"tidb_request_factor", 6000000.00}, + TiDBTemp: coreusage.CostVer2Factor{"tidb_temp_table_factor", 0.00}, + TiKVScan: coreusage.CostVer2Factor{"tikv_scan_factor", 40.70}, + TiKVDescScan: coreusage.CostVer2Factor{"tikv_desc_scan_factor", 61.05}, + TiFlashScan: coreusage.CostVer2Factor{"tiflash_scan_factor", 11.60}, + TiDBCPU: coreusage.CostVer2Factor{"tidb_cpu_factor", 49.90}, + TiKVCPU: coreusage.CostVer2Factor{"tikv_cpu_factor", 49.90}, + TiFlashCPU: coreusage.CostVer2Factor{"tiflash_cpu_factor", 2.40}, + TiDB2KVNet: coreusage.CostVer2Factor{"tidb_kv_net_factor", 3.96}, + TiDB2FlashNet: coreusage.CostVer2Factor{"tidb_flash_net_factor", 2.20}, + TiFlashMPPNet: coreusage.CostVer2Factor{"tiflash_mpp_net_factor", 1.00}, + TiDBMem: coreusage.CostVer2Factor{"tidb_mem_factor", 0.20}, + TiKVMem: coreusage.CostVer2Factor{"tikv_mem_factor", 0.20}, + TiFlashMem: coreusage.CostVer2Factor{"tiflash_mem_factor", 0.05}, + TiDBDisk: coreusage.CostVer2Factor{"tidb_disk_factor", 200.00}, + TiDBRequest: coreusage.CostVer2Factor{"tidb_request_factor", 6000000.00}, } -func getTaskCPUFactorVer2(_ PhysicalPlan, taskType property.TaskType) costVer2Factor { +func getTaskCPUFactorVer2(_ PhysicalPlan, taskType property.TaskType) coreusage.CostVer2Factor { switch taskType { case property.RootTaskType: // TiDB return defaultVer2Factors.TiDBCPU @@ -951,7 +942,7 @@ func getTaskCPUFactorVer2(_ PhysicalPlan, taskType property.TaskType) costVer2Fa } } -func getTaskMemFactorVer2(_ PhysicalPlan, taskType property.TaskType) costVer2Factor { +func getTaskMemFactorVer2(_ PhysicalPlan, taskType property.TaskType) coreusage.CostVer2Factor { switch taskType { case property.RootTaskType: // TiDB return defaultVer2Factors.TiDBMem @@ -962,7 +953,7 @@ func getTaskMemFactorVer2(_ PhysicalPlan, taskType property.TaskType) costVer2Fa } } -func getTaskScanFactorVer2(p PhysicalPlan, storeType kv.StoreType, taskType property.TaskType) costVer2Factor { +func getTaskScanFactorVer2(p PhysicalPlan, storeType kv.StoreType, taskType property.TaskType) coreusage.CostVer2Factor { if isTemporaryTable(getTableInfo(p)) { return defaultVer2Factors.TiDBTemp } @@ -987,7 +978,7 @@ func getTaskScanFactorVer2(p PhysicalPlan, storeType kv.StoreType, taskType prop } } -func getTaskNetFactorVer2(p PhysicalPlan, _ property.TaskType) costVer2Factor { +func getTaskNetFactorVer2(p PhysicalPlan, _ property.TaskType) coreusage.CostVer2Factor { if isTemporaryTable(getTableInfo(p)) { return defaultVer2Factors.TiDBTemp } @@ -1002,7 +993,7 @@ func getTaskNetFactorVer2(p PhysicalPlan, _ property.TaskType) costVer2Factor { return defaultVer2Factors.TiDB2KVNet } -func getTaskRequestFactorVer2(p PhysicalPlan, _ property.TaskType) costVer2Factor { +func getTaskRequestFactorVer2(p PhysicalPlan, _ property.TaskType) coreusage.CostVer2Factor { if isTemporaryTable(getTableInfo(p)) { return defaultVer2Factors.TiDBTemp } @@ -1045,85 +1036,3 @@ func cols2Exprs(cols []*expression.Column) []expression.Expression { } return exprs } - -type costTrace struct { - factorCosts map[string]float64 // map[factorName]cost, used to calibrate the cost model - formula string // It used to trace the cost calculation. -} - -type costVer2 struct { - cost float64 - trace *costTrace -} - -func traceCost(option *PlanCostOption) bool { - if option != nil && hasCostFlag(option.CostFlag, CostFlagTrace) { - return true - } - return false -} - -func newZeroCostVer2(trace bool) (ret costVer2) { - if trace { - ret.trace = &costTrace{make(map[string]float64), ""} - } - return -} - -func newCostVer2(option *PlanCostOption, factor costVer2Factor, cost float64, lazyFormula func() string) (ret costVer2) { - ret.cost = cost - if traceCost(option) { - ret.trace = &costTrace{make(map[string]float64), ""} - ret.trace.factorCosts[factor.Name] = cost - ret.trace.formula = lazyFormula() - } - return ret -} - -func sumCostVer2(costs ...costVer2) (ret costVer2) { - if len(costs) == 0 { - return - } - for _, c := range costs { - ret.cost += c.cost - if c.trace != nil { - if ret.trace == nil { // init - ret.trace = &costTrace{make(map[string]float64), ""} - } - for factor, factorCost := range c.trace.factorCosts { - ret.trace.factorCosts[factor] += factorCost - } - if ret.trace.formula != "" { - ret.trace.formula += " + " - } - ret.trace.formula += "(" + c.trace.formula + ")" - } - } - return ret -} - -func divCostVer2(cost costVer2, denominator float64) (ret costVer2) { - ret.cost = cost.cost / denominator - if cost.trace != nil { - ret.trace = &costTrace{make(map[string]float64), ""} - for f, c := range cost.trace.factorCosts { - ret.trace.factorCosts[f] = c / denominator - } - ret.trace.formula = "(" + cost.trace.formula + ")/" + strconv.FormatFloat(denominator, 'f', 2, 64) - } - return ret -} - -func mulCostVer2(cost costVer2, scale float64) (ret costVer2) { - ret.cost = cost.cost * scale - if cost.trace != nil { - ret.trace = &costTrace{make(map[string]float64), ""} - for f, c := range cost.trace.factorCosts { - ret.trace.factorCosts[f] = c * scale - } - ret.trace.formula = "(" + cost.trace.formula + ")*" + strconv.FormatFloat(scale, 'f', 2, 64) - } - return ret -} - -var zeroCostVer2 = newZeroCostVer2(false) diff --git a/pkg/planner/core/plan_cost_ver2_test.go b/pkg/planner/core/plan_cost_ver2_test.go index 657092f0e7bf3..de94de53fb242 100644 --- a/pkg/planner/core/plan_cost_ver2_test.go +++ b/pkg/planner/core/plan_cost_ver2_test.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb/pkg/planner" "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/property" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/sessiontxn" "github.com/pingcap/tidb/pkg/testkit" "github.com/stretchr/testify/require" @@ -168,13 +169,13 @@ func BenchmarkGetPlanCost(b *testing.B) { b.Fatal(err) } phyPlan := plan.(core.PhysicalPlan) - _, err = core.GetPlanCost(phyPlan, property.RootTaskType, core.NewDefaultPlanCostOption().WithCostFlag(core.CostFlagRecalculate)) + _, err = core.GetPlanCost(phyPlan, property.RootTaskType, coreusage.NewDefaultPlanCostOption().WithCostFlag(coreusage.CostFlagRecalculate)) if err != nil { b.Fatal(err) } b.ResetTimer() for i := 0; i < b.N; i++ { - _, _ = core.GetPlanCost(phyPlan, property.RootTaskType, core.NewDefaultPlanCostOption().WithCostFlag(core.CostFlagRecalculate)) + _, _ = core.GetPlanCost(phyPlan, property.RootTaskType, coreusage.NewDefaultPlanCostOption().WithCostFlag(coreusage.CostFlagRecalculate)) } } diff --git a/pkg/planner/core/planbuilder.go b/pkg/planner/core/planbuilder.go index f23669d47313f..edcd6859530b0 100644 --- a/pkg/planner/core/planbuilder.go +++ b/pkg/planner/core/planbuilder.go @@ -1562,11 +1562,11 @@ func (b *PlanBuilder) buildPhysicalIndexLookUpReader(_ context.Context, dbName m extraHandleCol: extraCol, commonHandleCols: commonCols, } - rootT := cop.convertToRootTask(b.ctx) - if err := rootT.p.ResolveIndices(); err != nil { + rootT := cop.ConvertToRootTask(b.ctx) + if err := rootT.GetPlan().ResolveIndices(); err != nil { return nil, err } - return rootT.p, nil + return rootT.GetPlan(), nil } func getIndexColumnInfos(tblInfo *model.TableInfo, idx *model.IndexInfo) []*model.ColumnInfo { diff --git a/pkg/planner/core/point_get_plan.go b/pkg/planner/core/point_get_plan.go index de57206b5d4b5..0ffdda7023f87 100644 --- a/pkg/planner/core/point_get_plan.go +++ b/pkg/planner/core/point_get_plan.go @@ -15,6 +15,7 @@ package core import ( + "github.com/pingcap/tidb/pkg/planner/util/coreusage" math2 "math" "strconv" "strings" @@ -95,7 +96,7 @@ type PointGetPlan struct { // required by cost model planCostInit bool planCost float64 - planCostVer2 costVer2 + planCostVer2 coreusage.CostVer2 // accessCols represents actual columns the PointGet will access, which are used to calculate row-size accessCols []*expression.Column @@ -108,21 +109,21 @@ type PointGetPlan struct { PartitionNames []model.CIStr } -func (p *PointGetPlan) getEstRowCountForDisplay() float64 { +func (p *PointGetPlan) GetEstRowCountForDisplay() float64 { if p == nil { return 0 } return p.StatsInfo().RowCount * getEstimatedProbeCntFromProbeParents(p.probeParents) } -func (p *PointGetPlan) getActualProbeCnt(statsColl *execdetails.RuntimeStatsColl) int64 { +func (p *PointGetPlan) GetActualProbeCnt(statsColl *execdetails.RuntimeStatsColl) int64 { if p == nil { return 1 } return getActualProbeCntFromProbeParents(p.probeParents, statsColl) } -func (p *PointGetPlan) setProbeParents(probeParents []PhysicalPlan) { +func (p *PointGetPlan) SetProbeParents(probeParents []PhysicalPlan) { p.probeParents = probeParents } @@ -148,9 +149,9 @@ func (p *PointGetPlan) SetCost(cost float64) { p.cost = cost } -// attach2Task makes the current physical plan as the father of task's physicalPlan and updates the cost of +// Attach2Task makes the current physical plan as the father of task's physicalPlan and updates the cost of // current task. If the child's task is cop task, some operator may close this task and return a new rootTask. -func (*PointGetPlan) attach2Task(...task) task { +func (*PointGetPlan) Attach2Task(...Task) Task { return nil } @@ -260,7 +261,7 @@ func (p *PointGetPlan) SetOutputNames(names types.NameSlice) { p.outputNames = names } -func (*PointGetPlan) appendChildCandidate(_ *physicalOptimizeOp) {} +func (*PointGetPlan) AppendChildCandidate(_ *coreusage.PhysicalOptimizeOp) {} const emptyPointGetPlanSize = int64(unsafe.Sizeof(PointGetPlan{})) @@ -434,7 +435,7 @@ type BatchPointGetPlan struct { // required by cost model planCostInit bool planCost float64 - planCostVer2 costVer2 + planCostVer2 coreusage.CostVer2 // accessCols represents actual columns the PointGet will access, which are used to calculate row-size accessCols []*expression.Column @@ -445,20 +446,20 @@ type BatchPointGetPlan struct { PartitionNames []model.CIStr } -func (p *BatchPointGetPlan) getEstRowCountForDisplay() float64 { +func (p *BatchPointGetPlan) GetEstRowCountForDisplay() float64 { if p == nil { return 0 } return p.StatsInfo().RowCount * getEstimatedProbeCntFromProbeParents(p.probeParents) } -func (p *BatchPointGetPlan) getActualProbeCnt(statsColl *execdetails.RuntimeStatsColl) int64 { +func (p *BatchPointGetPlan) GetActualProbeCnt(statsColl *execdetails.RuntimeStatsColl) int64 { if p == nil { return 1 } return getActualProbeCntFromProbeParents(p.probeParents, statsColl) } -func (p *BatchPointGetPlan) setProbeParents(probeParents []PhysicalPlan) { +func (p *BatchPointGetPlan) SetProbeParents(probeParents []PhysicalPlan) { p.probeParents = probeParents } @@ -482,9 +483,9 @@ func (*BatchPointGetPlan) ExtractCorrelatedCols() []*expression.CorrelatedColumn return nil } -// attach2Task makes the current physical plan as the father of task's physicalPlan and updates the cost of +// Attach2Task makes the current physical plan as the father of task's physicalPlan and updates the cost of // current task. If the child's task is cop task, some operator may close this task and return a new rootTask. -func (*BatchPointGetPlan) attach2Task(...task) task { +func (*BatchPointGetPlan) Attach2Task(...Task) Task { return nil } @@ -571,7 +572,7 @@ func (p *BatchPointGetPlan) SetOutputNames(names types.NameSlice) { p.names = names } -func (*BatchPointGetPlan) appendChildCandidate(_ *physicalOptimizeOp) {} +func (*BatchPointGetPlan) AppendChildCandidate(_ *coreusage.PhysicalOptimizeOp) {} const emptyBatchPointGetPlanSize = int64(unsafe.Sizeof(BatchPointGetPlan{})) diff --git a/pkg/planner/core/rule_aggregation_elimination.go b/pkg/planner/core/rule_aggregation_elimination.go index 0670a1dca0ec5..70e31c18fdc49 100644 --- a/pkg/planner/core/rule_aggregation_elimination.go +++ b/pkg/planner/core/rule_aggregation_elimination.go @@ -17,13 +17,13 @@ package core import ( "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "math" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/types" ) @@ -48,7 +48,7 @@ type aggregationEliminateChecker struct { // e.g. select min(b) from t group by a. If a is a unique key, then this sql is equal to `select b from t group by a`. // For count(expr), sum(expr), avg(expr), count(distinct expr, [expr...]) we may need to rewrite the expr. Details are shown below. // If we can eliminate agg successful, we return a projection. Else we return a nil pointer. -func (a *aggregationEliminateChecker) tryToEliminateAggregation(agg *LogicalAggregation, opt *util.LogicalOptimizeOp) *LogicalProjection { +func (a *aggregationEliminateChecker) tryToEliminateAggregation(agg *LogicalAggregation, opt *coreusage.LogicalOptimizeOp) *LogicalProjection { for _, af := range agg.AggFuncs { // TODO(issue #9968): Actually, we can rewrite GROUP_CONCAT when all the // arguments it accepts are promised to be NOT-NULL. @@ -89,7 +89,7 @@ func (a *aggregationEliminateChecker) tryToEliminateAggregation(agg *LogicalAggr // tryToEliminateDistinct will eliminate distinct in the aggregation function if the aggregation args // have unique key column. see detail example in https://github.com/pingcap/tidb/issues/23436 -func (*aggregationEliminateChecker) tryToEliminateDistinct(agg *LogicalAggregation, opt *util.LogicalOptimizeOp) { +func (*aggregationEliminateChecker) tryToEliminateDistinct(agg *LogicalAggregation, opt *coreusage.LogicalOptimizeOp) { for _, af := range agg.AggFuncs { if af.HasDistinct { cols := make([]*expression.Column, 0, len(af.Args)) @@ -129,7 +129,7 @@ func (*aggregationEliminateChecker) tryToEliminateDistinct(agg *LogicalAggregati } } -func appendAggregationEliminateTraceStep(agg *LogicalAggregation, proj *LogicalProjection, uniqueKey expression.KeyInfo, opt *util.LogicalOptimizeOp) { +func appendAggregationEliminateTraceStep(agg *LogicalAggregation, proj *LogicalProjection, uniqueKey expression.KeyInfo, opt *coreusage.LogicalOptimizeOp) { reason := func() string { return fmt.Sprintf("%s is a unique key", uniqueKey.String()) } @@ -141,7 +141,7 @@ func appendAggregationEliminateTraceStep(agg *LogicalAggregation, proj *LogicalP } func appendDistinctEliminateTraceStep(agg *LogicalAggregation, uniqueKey expression.KeyInfo, af *aggregation.AggFuncDesc, - opt *util.LogicalOptimizeOp) { + opt *coreusage.LogicalOptimizeOp) { reason := func() string { return fmt.Sprintf("%s is a unique key", uniqueKey.String()) } @@ -254,7 +254,7 @@ func wrapCastFunction(ctx expression.BuildContext, arg expression.Expression, ta return expression.BuildCastFunction(ctx, arg, targetTp) } -func (a *aggregationEliminator) optimize(ctx context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (a *aggregationEliminator) optimize(ctx context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false newChildren := make([]LogicalPlan, 0, len(p.Children())) for _, child := range p.Children() { diff --git a/pkg/planner/core/rule_aggregation_push_down.go b/pkg/planner/core/rule_aggregation_push_down.go index 1191097ab55e9..cf056f745abc7 100644 --- a/pkg/planner/core/rule_aggregation_push_down.go +++ b/pkg/planner/core/rule_aggregation_push_down.go @@ -18,6 +18,7 @@ import ( "bytes" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" @@ -248,7 +249,7 @@ func (*aggregationPushDownSolver) decompose(ctx PlanContext, aggFunc *aggregatio // process it temporarily. If not, We will add additional group by columns and first row functions. We make a new aggregation operator. // If the pushed aggregation is grouped by unique key, it's no need to push it down. func (a *aggregationPushDownSolver) tryToPushDownAgg(oldAgg *LogicalAggregation, aggFuncs []*aggregation.AggFuncDesc, gbyCols []*expression.Column, - join *LogicalJoin, childIdx int, blockOffset int, opt *util.LogicalOptimizeOp) (_ LogicalPlan, err error) { + join *LogicalJoin, childIdx int, blockOffset int, opt *coreusage.LogicalOptimizeOp) (_ LogicalPlan, err error) { child := join.children[childIdx] if aggregation.IsAllFirstRow(aggFuncs) { return child, nil @@ -433,13 +434,13 @@ func (*aggregationPushDownSolver) pushAggCrossUnion(agg *LogicalAggregation, uni return newAgg, nil } -func (a *aggregationPushDownSolver) optimize(_ context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (a *aggregationPushDownSolver) optimize(_ context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false newLogicalPlan, err := a.aggPushDown(p, opt) return newLogicalPlan, planChanged, err } -func (a *aggregationPushDownSolver) tryAggPushDownForUnion(union *LogicalUnionAll, agg *LogicalAggregation, opt *util.LogicalOptimizeOp) error { +func (a *aggregationPushDownSolver) tryAggPushDownForUnion(union *LogicalUnionAll, agg *LogicalAggregation, opt *coreusage.LogicalOptimizeOp) error { for _, aggFunc := range agg.AggFuncs { if !a.isDecomposableWithUnion(aggFunc) { return nil @@ -474,7 +475,7 @@ func (a *aggregationPushDownSolver) tryAggPushDownForUnion(union *LogicalUnionAl } // aggPushDown tries to push down aggregate functions to join paths. -func (a *aggregationPushDownSolver) aggPushDown(p LogicalPlan, opt *util.LogicalOptimizeOp) (_ LogicalPlan, err error) { +func (a *aggregationPushDownSolver) aggPushDown(p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (_ LogicalPlan, err error) { if agg, ok := p.(*LogicalAggregation); ok { proj := a.tryToEliminateAggregation(agg, opt) if proj != nil { @@ -683,7 +684,7 @@ func (*aggregationPushDownSolver) name() string { } func appendAggPushDownAcrossJoinTraceStep(oldAgg, newAgg *LogicalAggregation, aggFuncs []*aggregation.AggFuncDesc, join *LogicalJoin, - childIdx int, opt *util.LogicalOptimizeOp) { + childIdx int, opt *coreusage.LogicalOptimizeOp) { reason := func() string { buffer := bytes.NewBufferString(fmt.Sprintf("%v_%v's functions[", oldAgg.TP(), oldAgg.ID())) for i, aggFunc := range aggFuncs { @@ -708,7 +709,7 @@ func appendAggPushDownAcrossJoinTraceStep(oldAgg, newAgg *LogicalAggregation, ag opt.AppendStepToCurrent(join.ID(), join.TP(), reason, action) } -func appendAggPushDownAcrossProjTraceStep(agg *LogicalAggregation, proj *LogicalProjection, opt *util.LogicalOptimizeOp) { +func appendAggPushDownAcrossProjTraceStep(agg *LogicalAggregation, proj *LogicalProjection, opt *coreusage.LogicalOptimizeOp) { action := func() string { buffer := bytes.NewBufferString(fmt.Sprintf("%v_%v is eliminated, and %v_%v's functions changed into[", proj.TP(), proj.ID(), agg.TP(), agg.ID())) for i, aggFunc := range agg.AggFuncs { @@ -726,7 +727,7 @@ func appendAggPushDownAcrossProjTraceStep(agg *LogicalAggregation, proj *Logical opt.AppendStepToCurrent(agg.ID(), agg.TP(), reason, action) } -func appendAggPushDownAcrossUnionTraceStep(union *LogicalUnionAll, agg *LogicalAggregation, opt *util.LogicalOptimizeOp) { +func appendAggPushDownAcrossUnionTraceStep(union *LogicalUnionAll, agg *LogicalAggregation, opt *coreusage.LogicalOptimizeOp) { reason := func() string { buffer := bytes.NewBufferString(fmt.Sprintf("%v_%v functions[", agg.TP(), agg.ID())) for i, aggFunc := range agg.AggFuncs { diff --git a/pkg/planner/core/rule_aggregation_skew_rewrite.go b/pkg/planner/core/rule_aggregation_skew_rewrite.go index 46c251d4bd544..9bd8f29b1a4c7 100644 --- a/pkg/planner/core/rule_aggregation_skew_rewrite.go +++ b/pkg/planner/core/rule_aggregation_skew_rewrite.go @@ -17,11 +17,11 @@ package core import ( "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/util/intset" ) @@ -47,7 +47,7 @@ type skewDistinctAggRewriter struct { // - The aggregate has 1 and only 1 distinct aggregate function (limited to count, avg, sum) // // This rule is disabled by default. Use tidb_opt_skew_distinct_agg to enable the rule. -func (a *skewDistinctAggRewriter) rewriteSkewDistinctAgg(agg *LogicalAggregation, opt *util.LogicalOptimizeOp) LogicalPlan { +func (a *skewDistinctAggRewriter) rewriteSkewDistinctAgg(agg *LogicalAggregation, opt *coreusage.LogicalOptimizeOp) LogicalPlan { // only group aggregate is applicable if len(agg.GroupByItems) == 0 { return nil @@ -263,7 +263,7 @@ func (*skewDistinctAggRewriter) isQualifiedAgg(aggFunc *aggregation.AggFuncDesc) } } -func appendSkewDistinctAggRewriteTraceStep(agg *LogicalAggregation, result LogicalPlan, opt *util.LogicalOptimizeOp) { +func appendSkewDistinctAggRewriteTraceStep(agg *LogicalAggregation, result LogicalPlan, opt *coreusage.LogicalOptimizeOp) { reason := func() string { return fmt.Sprintf("%v_%v has a distinct agg function", agg.TP(), agg.ID()) } @@ -274,7 +274,7 @@ func appendSkewDistinctAggRewriteTraceStep(agg *LogicalAggregation, result Logic opt.AppendStepToCurrent(agg.ID(), agg.TP(), reason, action) } -func (a *skewDistinctAggRewriter) optimize(ctx context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (a *skewDistinctAggRewriter) optimize(ctx context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false newChildren := make([]LogicalPlan, 0, len(p.Children())) for _, child := range p.Children() { diff --git a/pkg/planner/core/rule_build_key_info.go b/pkg/planner/core/rule_build_key_info.go index 74ea9187a8674..b6c25155b7ddb 100644 --- a/pkg/planner/core/rule_build_key_info.go +++ b/pkg/planner/core/rule_build_key_info.go @@ -16,17 +16,17 @@ package core import ( "context" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/planner/util" ) type buildKeySolver struct{} -func (*buildKeySolver) optimize(_ context.Context, p LogicalPlan, _ *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (*buildKeySolver) optimize(_ context.Context, p LogicalPlan, _ *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false buildKeyInfo(p) return p, planChanged, nil diff --git a/pkg/planner/core/rule_collect_plan_stats.go b/pkg/planner/core/rule_collect_plan_stats.go index 027522a111d7f..53fa8aec89958 100644 --- a/pkg/planner/core/rule_collect_plan_stats.go +++ b/pkg/planner/core/rule_collect_plan_stats.go @@ -16,13 +16,13 @@ package core import ( "context" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "time" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/parser/model" - "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/table" @@ -32,7 +32,7 @@ import ( type collectPredicateColumnsPoint struct{} -func (collectPredicateColumnsPoint) optimize(_ context.Context, plan LogicalPlan, _ *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (collectPredicateColumnsPoint) optimize(_ context.Context, plan LogicalPlan, _ *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false if plan.SCtx().GetSessionVars().InRestrictedSQL { return plan, planChanged, nil @@ -78,7 +78,7 @@ func (collectPredicateColumnsPoint) name() string { type syncWaitStatsLoadPoint struct{} -func (syncWaitStatsLoadPoint) optimize(_ context.Context, plan LogicalPlan, _ *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (syncWaitStatsLoadPoint) optimize(_ context.Context, plan LogicalPlan, _ *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false if plan.SCtx().GetSessionVars().InRestrictedSQL { return plan, planChanged, nil diff --git a/pkg/planner/core/rule_column_pruning.go b/pkg/planner/core/rule_column_pruning.go index 04d8b953f5884..d7f2f7f90e2e2 100644 --- a/pkg/planner/core/rule_column_pruning.go +++ b/pkg/planner/core/rule_column_pruning.go @@ -18,6 +18,7 @@ import ( "bytes" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" @@ -32,7 +33,7 @@ import ( type columnPruner struct { } -func (*columnPruner) optimize(_ context.Context, lp LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (*columnPruner) optimize(_ context.Context, lp LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false lp, err := lp.PruneColumns(lp.Schema().Columns, opt) if err != nil { @@ -74,7 +75,7 @@ func exprHasSetVarOrSleep(expr expression.Expression) bool { // the level projection expressions construction is left to the last logical optimize rule) // // so when do the rule_column_pruning here, we just prune the schema is enough. -func (p *LogicalExpand) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalExpand) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { // Expand need those extra redundant distinct group by columns projected from underlying projection. // distinct GroupByCol must be used by aggregate above, to make sure this, append distinctGroupByCol again. parentUsedCols = append(parentUsedCols, p.distinctGroupByCol...) @@ -99,7 +100,7 @@ func (p *LogicalExpand) PruneColumns(parentUsedCols []*expression.Column, opt *u // PruneColumns implements LogicalPlan interface. // If any expression has SetVar function or Sleep function, we do not prune it. -func (p *LogicalProjection) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalProjection) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { used := expression.GetUsedList(p.SCtx().GetExprCtx(), parentUsedCols, p.schema) prunedColumns := make([]*expression.Column, 0) @@ -123,7 +124,7 @@ func (p *LogicalProjection) PruneColumns(parentUsedCols []*expression.Column, op } // PruneColumns implements LogicalPlan interface. -func (p *LogicalSelection) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalSelection) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { child := p.children[0] parentUsedCols = expression.ExtractColumnsFromExpressions(parentUsedCols, p.Conditions, nil) var err error @@ -135,7 +136,7 @@ func (p *LogicalSelection) PruneColumns(parentUsedCols []*expression.Column, opt } // PruneColumns implements LogicalPlan interface. -func (la *LogicalAggregation) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (la *LogicalAggregation) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { child := la.children[0] used := expression.GetUsedList(la.SCtx().GetExprCtx(), parentUsedCols, la.Schema()) prunedColumns := make([]*expression.Column, 0) @@ -228,7 +229,7 @@ func (la *LogicalAggregation) PruneColumns(parentUsedCols []*expression.Column, return la, nil } -func pruneByItems(p LogicalPlan, old []*util.ByItems, opt *util.LogicalOptimizeOp) (byItems []*util.ByItems, +func pruneByItems(p LogicalPlan, old []*util.ByItems, opt *coreusage.LogicalOptimizeOp) (byItems []*util.ByItems, parentUsedCols []*expression.Column) { prunedByItems := make([]*util.ByItems, 0) byItems = make([]*util.ByItems, 0, len(old)) @@ -262,7 +263,7 @@ func pruneByItems(p LogicalPlan, old []*util.ByItems, opt *util.LogicalOptimizeO // PruneColumns implements LogicalPlan interface. // If any expression can view as a constant in execution stage, such as correlated column, constant, // we do prune them. Note that we can't prune the expressions contain non-deterministic functions, such as rand(). -func (ls *LogicalSort) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (ls *LogicalSort) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { var cols []*expression.Column ls.ByItems, cols = pruneByItems(ls, ls.ByItems, opt) parentUsedCols = append(parentUsedCols, cols...) @@ -277,7 +278,7 @@ func (ls *LogicalSort) PruneColumns(parentUsedCols []*expression.Column, opt *ut // PruneColumns implements LogicalPlan interface. // If any expression can view as a constant in execution stage, such as correlated column, constant, // we do prune them. Note that we can't prune the expressions contain non-deterministic functions, such as rand(). -func (lt *LogicalTopN) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (lt *LogicalTopN) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { child := lt.children[0] var cols []*expression.Column lt.ByItems, cols = pruneByItems(lt, lt.ByItems, opt) @@ -291,7 +292,7 @@ func (lt *LogicalTopN) PruneColumns(parentUsedCols []*expression.Column, opt *ut } // PruneColumns implements LogicalPlan interface. -func (p *LogicalUnionAll) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalUnionAll) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { used := expression.GetUsedList(p.SCtx().GetExprCtx(), parentUsedCols, p.schema) hasBeenUsed := false for i := range used { @@ -346,7 +347,7 @@ func (p *LogicalUnionAll) PruneColumns(parentUsedCols []*expression.Column, opt } // PruneColumns implements LogicalPlan interface. -func (p *LogicalUnionScan) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalUnionScan) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { for i := 0; i < p.handleCols.NumCols(); i++ { parentUsedCols = append(parentUsedCols, p.handleCols.GetCol(i)) } @@ -366,7 +367,7 @@ func (p *LogicalUnionScan) PruneColumns(parentUsedCols []*expression.Column, opt } // PruneColumns implements LogicalPlan interface. -func (ds *DataSource) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (ds *DataSource) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { used := expression.GetUsedList(ds.SCtx().GetExprCtx(), parentUsedCols, ds.schema) exprCols := expression.ExtractColumnsFromExpressions(nil, ds.allConds, nil) @@ -418,7 +419,7 @@ func (ds *DataSource) PruneColumns(parentUsedCols []*expression.Column, opt *uti } // PruneColumns implements LogicalPlan interface. -func (p *LogicalMemTable) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalMemTable) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { switch p.TableInfo.Name.O { case infoschema.TableStatementsSummary, infoschema.TableStatementsSummaryHistory, @@ -450,7 +451,7 @@ func (p *LogicalMemTable) PruneColumns(parentUsedCols []*expression.Column, opt } // PruneColumns implements LogicalPlan interface. -func (p *LogicalTableDual) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalTableDual) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { used := expression.GetUsedList(p.SCtx().GetExprCtx(), parentUsedCols, p.Schema()) prunedColumns := make([]*expression.Column, 0) for i := len(used) - 1; i >= 0; i-- { @@ -496,7 +497,7 @@ func (p *LogicalJoin) mergeSchema() { } // PruneColumns implements LogicalPlan interface. -func (p *LogicalJoin) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalJoin) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { leftCols, rightCols := p.extractUsedCols(parentUsedCols) var err error @@ -522,7 +523,7 @@ func (p *LogicalJoin) PruneColumns(parentUsedCols []*expression.Column, opt *uti } // PruneColumns implements LogicalPlan interface. -func (la *LogicalApply) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (la *LogicalApply) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { leftCols, rightCols := la.extractUsedCols(parentUsedCols) allowEliminateApply := fixcontrol.GetBoolWithDefault(la.SCtx().GetSessionVars().GetOptimizerFixControlMap(), fixcontrol.Fix45822, true) var err error @@ -556,7 +557,7 @@ func (la *LogicalApply) PruneColumns(parentUsedCols []*expression.Column, opt *u } // PruneColumns implements LogicalPlan interface. -func (p *LogicalLock) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalLock) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { var err error if !IsSelectForUpdateLockType(p.Lock.LockType) { // when use .baseLogicalPlan to call the PruneColumns, it means current plan itself has @@ -589,7 +590,7 @@ func (p *LogicalLock) PruneColumns(parentUsedCols []*expression.Column, opt *uti } // PruneColumns implements LogicalPlan interface. -func (p *LogicalWindow) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalWindow) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { windowColumns := p.GetWindowResultColumns() cnt := 0 for _, col := range parentUsedCols { @@ -634,7 +635,7 @@ func (p *LogicalWindow) extractUsedCols(parentUsedCols []*expression.Column) []* } // PruneColumns implements LogicalPlan interface. -func (p *LogicalLimit) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalLimit) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { if len(parentUsedCols) == 0 { // happens when LIMIT appears in UPDATE. return p, nil } @@ -676,7 +677,7 @@ func addConstOneForEmptyProjection(p LogicalPlan) { }) } -func appendColumnPruneTraceStep(p LogicalPlan, prunedColumns []*expression.Column, opt *util.LogicalOptimizeOp) { +func appendColumnPruneTraceStep(p LogicalPlan, prunedColumns []*expression.Column, opt *coreusage.LogicalOptimizeOp) { if len(prunedColumns) < 1 { return } @@ -687,7 +688,7 @@ func appendColumnPruneTraceStep(p LogicalPlan, prunedColumns []*expression.Colum appendItemPruneTraceStep(p, "columns", s, opt) } -func appendFunctionPruneTraceStep(p LogicalPlan, prunedFunctions []*aggregation.AggFuncDesc, opt *util.LogicalOptimizeOp) { +func appendFunctionPruneTraceStep(p LogicalPlan, prunedFunctions []*aggregation.AggFuncDesc, opt *coreusage.LogicalOptimizeOp) { if len(prunedFunctions) < 1 { return } @@ -698,7 +699,7 @@ func appendFunctionPruneTraceStep(p LogicalPlan, prunedFunctions []*aggregation. appendItemPruneTraceStep(p, "aggregation functions", s, opt) } -func appendByItemsPruneTraceStep(p LogicalPlan, prunedByItems []*util.ByItems, opt *util.LogicalOptimizeOp) { +func appendByItemsPruneTraceStep(p LogicalPlan, prunedByItems []*util.ByItems, opt *coreusage.LogicalOptimizeOp) { if len(prunedByItems) < 1 { return } @@ -709,7 +710,7 @@ func appendByItemsPruneTraceStep(p LogicalPlan, prunedByItems []*util.ByItems, o appendItemPruneTraceStep(p, "byItems", s, opt) } -func appendGroupByItemsPruneTraceStep(p LogicalPlan, prunedGroupByItems []expression.Expression, opt *util.LogicalOptimizeOp) { +func appendGroupByItemsPruneTraceStep(p LogicalPlan, prunedGroupByItems []expression.Expression, opt *coreusage.LogicalOptimizeOp) { if len(prunedGroupByItems) < 1 { return } @@ -720,7 +721,7 @@ func appendGroupByItemsPruneTraceStep(p LogicalPlan, prunedGroupByItems []expres appendItemPruneTraceStep(p, "groupByItems", s, opt) } -func appendItemPruneTraceStep(p LogicalPlan, itemType string, prunedObjects []fmt.Stringer, opt *util.LogicalOptimizeOp) { +func appendItemPruneTraceStep(p LogicalPlan, itemType string, prunedObjects []fmt.Stringer, opt *coreusage.LogicalOptimizeOp) { if len(prunedObjects) < 1 { return } @@ -768,12 +769,12 @@ func preferKeyColumnFromTable(dataSource *DataSource, originColumns []*expressio // PruneColumns implements the interface of LogicalPlan. // LogicalCTE just do a empty function call. It's logical optimize is indivisual phase. -func (p *LogicalCTE) PruneColumns(_ []*expression.Column, _ *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalCTE) PruneColumns(_ []*expression.Column, _ *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { return p, nil } // PruneColumns implements the interface of LogicalPlan. -func (p *LogicalSequence) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalSequence) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { var err error p.children[len(p.children)-1], err = p.children[len(p.children)-1].PruneColumns(parentUsedCols, opt) if err != nil { @@ -782,7 +783,7 @@ func (p *LogicalSequence) PruneColumns(parentUsedCols []*expression.Column, opt return p, nil } -func applyEliminateTraceStep(lp LogicalPlan, opt *util.LogicalOptimizeOp) { +func applyEliminateTraceStep(lp LogicalPlan, opt *coreusage.LogicalOptimizeOp) { action := func() string { buffer := bytes.NewBufferString( fmt.Sprintf("%v_%v is eliminated.", lp.TP(), lp.ID())) diff --git a/pkg/planner/core/rule_constant_propagation.go b/pkg/planner/core/rule_constant_propagation.go index ee53f78e218f9..8ef968eb2fa1a 100644 --- a/pkg/planner/core/rule_constant_propagation.go +++ b/pkg/planner/core/rule_constant_propagation.go @@ -16,10 +16,10 @@ package core import ( "context" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/planner/util" ) // constantPropagationSolver can support constant propagated cross-query block. @@ -51,7 +51,7 @@ type constantPropagationSolver struct { // which is mainly implemented in the interface "constantPropagation" of LogicalPlan. // Currently only the Logical Join implements this function. (Used for the subquery in FROM List) // In the future, the Logical Apply will implements this function. (Used for the subquery in WHERE or SELECT list) -func (cp *constantPropagationSolver) optimize(_ context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (cp *constantPropagationSolver) optimize(_ context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false // constant propagation root plan newRoot := p.constantPropagation(nil, 0, opt) @@ -68,7 +68,7 @@ func (cp *constantPropagationSolver) optimize(_ context.Context, p LogicalPlan, } // execOptimize optimize constant propagation exclude root plan node -func (cp *constantPropagationSolver) execOptimize(currentPlan LogicalPlan, parentPlan LogicalPlan, currentChildIdx int, opt *util.LogicalOptimizeOp) { +func (cp *constantPropagationSolver) execOptimize(currentPlan LogicalPlan, parentPlan LogicalPlan, currentChildIdx int, opt *coreusage.LogicalOptimizeOp) { if parentPlan == nil { // Attention: The function 'execOptimize' could not handle the root plan, so the parent plan could not be nil. return @@ -85,7 +85,7 @@ func (*constantPropagationSolver) name() string { return "constant_propagation" } -func (*baseLogicalPlan) constantPropagation(_ LogicalPlan, _ int, _ *util.LogicalOptimizeOp) (newRoot LogicalPlan) { +func (*baseLogicalPlan) constantPropagation(_ LogicalPlan, _ int, _ *coreusage.LogicalOptimizeOp) (newRoot LogicalPlan) { // Only LogicalJoin can apply constant propagation // Other Logical plan do nothing return nil @@ -143,7 +143,7 @@ func (*baseLogicalPlan) constantPropagation(_ LogicalPlan, _ int, _ *util.Logica */ // Return nil if the root of plan has not been changed // Return new root if the root of plan is changed to selection -func (logicalJoin *LogicalJoin) constantPropagation(parentPlan LogicalPlan, currentChildIdx int, opt *util.LogicalOptimizeOp) (newRoot LogicalPlan) { +func (logicalJoin *LogicalJoin) constantPropagation(parentPlan LogicalPlan, currentChildIdx int, opt *coreusage.LogicalOptimizeOp) (newRoot LogicalPlan) { // step1: get constant predicate from left or right according to the JoinType var getConstantPredicateFromLeft bool var getConstantPredicateFromRight bool @@ -268,7 +268,7 @@ func validCompareConstantPredicate(candidatePredicate expression.Expression) boo // If the currentPlan at the top of query plan, return new root plan (selection) // Else return nil func addCandidateSelection(currentPlan LogicalPlan, currentChildIdx int, parentPlan LogicalPlan, - candidatePredicates []expression.Expression, opt *util.LogicalOptimizeOp) (newRoot LogicalPlan) { + candidatePredicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) (newRoot LogicalPlan) { // generate a new selection for candidatePredicates selection := LogicalSelection{Conditions: candidatePredicates}.Init(currentPlan.SCtx(), currentPlan.QueryBlockOffset()) // add selection above of p diff --git a/pkg/planner/core/rule_decorrelate.go b/pkg/planner/core/rule_decorrelate.go index 48450912d1757..e27ea34618b65 100644 --- a/pkg/planner/core/rule_decorrelate.go +++ b/pkg/planner/core/rule_decorrelate.go @@ -18,13 +18,13 @@ import ( "bytes" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "math" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/plancodec" ) @@ -194,7 +194,7 @@ func (*decorrelateSolver) aggDefaultValueMap(agg *LogicalAggregation) map[int]*e } // optimize implements logicalOptRule interface. -func (s *decorrelateSolver) optimize(ctx context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (s *decorrelateSolver) optimize(ctx context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false if apply, ok := p.(*LogicalApply); ok { outerPlan := apply.children[0] @@ -464,7 +464,7 @@ func (*decorrelateSolver) name() string { return "decorrelate" } -func appendApplySimplifiedTraceStep(p *LogicalApply, j *LogicalJoin, opt *util.LogicalOptimizeOp) { +func appendApplySimplifiedTraceStep(p *LogicalApply, j *LogicalJoin, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v simplified into %v_%v", plancodec.TypeApply, p.ID(), plancodec.TypeJoin, j.ID()) } @@ -474,7 +474,7 @@ func appendApplySimplifiedTraceStep(p *LogicalApply, j *LogicalJoin, opt *util.L opt.AppendStepToCurrent(p.ID(), p.TP(), reason, action) } -func appendRemoveSelectionTraceStep(p LogicalPlan, s *LogicalSelection, opt *util.LogicalOptimizeOp) { +func appendRemoveSelectionTraceStep(p LogicalPlan, s *LogicalSelection, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v removed from plan tree", s.TP(), s.ID()) } @@ -484,7 +484,7 @@ func appendRemoveSelectionTraceStep(p LogicalPlan, s *LogicalSelection, opt *uti opt.AppendStepToCurrent(s.ID(), s.TP(), reason, action) } -func appendRemoveMaxOneRowTraceStep(m *LogicalMaxOneRow, opt *util.LogicalOptimizeOp) { +func appendRemoveMaxOneRowTraceStep(m *LogicalMaxOneRow, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v removed from plan tree", m.TP(), m.ID()) } @@ -494,7 +494,7 @@ func appendRemoveMaxOneRowTraceStep(m *LogicalMaxOneRow, opt *util.LogicalOptimi opt.AppendStepToCurrent(m.ID(), m.TP(), reason, action) } -func appendRemoveLimitTraceStep(limit *LogicalLimit, opt *util.LogicalOptimizeOp) { +func appendRemoveLimitTraceStep(limit *LogicalLimit, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v removed from plan tree", limit.TP(), limit.ID()) } @@ -504,7 +504,7 @@ func appendRemoveLimitTraceStep(limit *LogicalLimit, opt *util.LogicalOptimizeOp opt.AppendStepToCurrent(limit.ID(), limit.TP(), reason, action) } -func appendRemoveProjTraceStep(p *LogicalApply, proj *LogicalProjection, opt *util.LogicalOptimizeOp) { +func appendRemoveProjTraceStep(p *LogicalApply, proj *LogicalProjection, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v removed from plan tree", proj.TP(), proj.ID()) } @@ -514,7 +514,7 @@ func appendRemoveProjTraceStep(p *LogicalApply, proj *LogicalProjection, opt *ut opt.AppendStepToCurrent(proj.ID(), proj.TP(), reason, action) } -func appendMoveProjTraceStep(p *LogicalApply, np LogicalPlan, proj *LogicalProjection, opt *util.LogicalOptimizeOp) { +func appendMoveProjTraceStep(p *LogicalApply, np LogicalPlan, proj *LogicalProjection, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v is moved as %v_%v's parent", proj.TP(), proj.ID(), np.TP(), np.ID()) } @@ -524,7 +524,7 @@ func appendMoveProjTraceStep(p *LogicalApply, np LogicalPlan, proj *LogicalProje opt.AppendStepToCurrent(proj.ID(), proj.TP(), reason, action) } -func appendRemoveSortTraceStep(sort *LogicalSort, opt *util.LogicalOptimizeOp) { +func appendRemoveSortTraceStep(sort *LogicalSort, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v removed from plan tree", sort.TP(), sort.ID()) } @@ -534,7 +534,7 @@ func appendRemoveSortTraceStep(sort *LogicalSort, opt *util.LogicalOptimizeOp) { opt.AppendStepToCurrent(sort.ID(), sort.TP(), reason, action) } -func appendPullUpAggTraceStep(p *LogicalApply, np LogicalPlan, agg *LogicalAggregation, opt *util.LogicalOptimizeOp) { +func appendPullUpAggTraceStep(p *LogicalApply, np LogicalPlan, agg *LogicalAggregation, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v pulled up as %v_%v's parent, and %v_%v's join type becomes %v", agg.TP(), agg.ID(), np.TP(), np.ID(), p.TP(), p.ID(), p.JoinType.String()) @@ -546,7 +546,7 @@ func appendPullUpAggTraceStep(p *LogicalApply, np LogicalPlan, agg *LogicalAggre opt.AppendStepToCurrent(agg.ID(), agg.TP(), reason, action) } -func appendAddProjTraceStep(p *LogicalApply, proj *LogicalProjection, opt *util.LogicalOptimizeOp) { +func appendAddProjTraceStep(p *LogicalApply, proj *LogicalProjection, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v is added as %v_%v's parent", proj.TP(), proj.ID(), p.TP(), p.ID()) } @@ -558,7 +558,7 @@ func appendAddProjTraceStep(p *LogicalApply, proj *LogicalProjection, opt *util. func appendModifyAggTraceStep(outerPlan LogicalPlan, p *LogicalApply, agg *LogicalAggregation, sel *LogicalSelection, appendedGroupByCols *expression.Schema, appendedAggFuncs []*aggregation.AggFuncDesc, - eqCondWithCorCol []*expression.ScalarFunction, opt *util.LogicalOptimizeOp) { + eqCondWithCorCol []*expression.ScalarFunction, opt *coreusage.LogicalOptimizeOp) { action := func() string { buffer := bytes.NewBufferString(fmt.Sprintf("%v_%v's groupby items added [", agg.TP(), agg.ID())) for i, col := range appendedGroupByCols.Columns { diff --git a/pkg/planner/core/rule_derive_topn_from_window.go b/pkg/planner/core/rule_derive_topn_from_window.go index 869de3d066c46..4a52cd52f1af7 100644 --- a/pkg/planner/core/rule_derive_topn_from_window.go +++ b/pkg/planner/core/rule_derive_topn_from_window.go @@ -17,6 +17,7 @@ package core import ( "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" @@ -28,7 +29,7 @@ import ( type deriveTopNFromWindow struct { } -func appendDerivedTopNTrace(topN LogicalPlan, opt *util.LogicalOptimizeOp) { +func appendDerivedTopNTrace(topN LogicalPlan, opt *coreusage.LogicalOptimizeOp) { child := topN.Children()[0] action := func() string { return fmt.Sprintf("%v_%v top N added below %v_%v ", topN.TP(), topN.ID(), child.TP(), child.ID()) @@ -116,12 +117,12 @@ func windowIsTopN(p *LogicalSelection) (bool, uint64) { return false, 0 } -func (*deriveTopNFromWindow) optimize(_ context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (*deriveTopNFromWindow) optimize(_ context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false return p.deriveTopN(opt), planChanged, nil } -func (s *baseLogicalPlan) deriveTopN(opt *util.LogicalOptimizeOp) LogicalPlan { +func (s *baseLogicalPlan) deriveTopN(opt *coreusage.LogicalOptimizeOp) LogicalPlan { p := s.self if p.SCtx().GetSessionVars().AllowDeriveTopN { for i, child := range p.Children() { @@ -132,7 +133,7 @@ func (s *baseLogicalPlan) deriveTopN(opt *util.LogicalOptimizeOp) LogicalPlan { return p } -func (s *LogicalSelection) deriveTopN(opt *util.LogicalOptimizeOp) LogicalPlan { +func (s *LogicalSelection) deriveTopN(opt *coreusage.LogicalOptimizeOp) LogicalPlan { p := s.self.(*LogicalSelection) windowIsTopN, limitValue := windowIsTopN(p) if windowIsTopN { diff --git a/pkg/planner/core/rule_eliminate_projection.go b/pkg/planner/core/rule_eliminate_projection.go index f821ac440154e..98d1b40db3f21 100644 --- a/pkg/planner/core/rule_eliminate_projection.go +++ b/pkg/planner/core/rule_eliminate_projection.go @@ -18,12 +18,12 @@ import ( "bytes" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/planner/util" ) // canProjectionBeEliminatedLoose checks whether a projection can be eliminated, @@ -168,14 +168,14 @@ type projectionEliminator struct { } // optimize implements the logicalOptRule interface. -func (pe *projectionEliminator) optimize(_ context.Context, lp LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (pe *projectionEliminator) optimize(_ context.Context, lp LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false root := pe.eliminate(lp, make(map[string]*expression.Column), false, opt) return root, planChanged, nil } // eliminate eliminates the redundant projection in a logical plan. -func (pe *projectionEliminator) eliminate(p LogicalPlan, replace map[string]*expression.Column, canEliminate bool, opt *util.LogicalOptimizeOp) LogicalPlan { +func (pe *projectionEliminator) eliminate(p LogicalPlan, replace map[string]*expression.Column, canEliminate bool, opt *coreusage.LogicalOptimizeOp) LogicalPlan { // LogicalCTE's logical optimization is independent. if _, ok := p.(*LogicalCTE); ok { return p @@ -339,7 +339,7 @@ func (*projectionEliminator) name() string { return "projection_eliminate" } -func appendDupProjEliminateTraceStep(parent, child *LogicalProjection, opt *util.LogicalOptimizeOp) { +func appendDupProjEliminateTraceStep(parent, child *LogicalProjection, opt *coreusage.LogicalOptimizeOp) { action := func() string { buffer := bytes.NewBufferString( fmt.Sprintf("%v_%v is eliminated, %v_%v's expressions changed into[", child.TP(), child.ID(), parent.TP(), parent.ID())) @@ -358,7 +358,7 @@ func appendDupProjEliminateTraceStep(parent, child *LogicalProjection, opt *util opt.AppendStepToCurrent(child.ID(), child.TP(), reason, action) } -func appendProjEliminateTraceStep(proj *LogicalProjection, opt *util.LogicalOptimizeOp) { +func appendProjEliminateTraceStep(proj *LogicalProjection, opt *coreusage.LogicalOptimizeOp) { reason := func() string { return fmt.Sprintf("%v_%v's Exprs are all Columns", proj.TP(), proj.ID()) } diff --git a/pkg/planner/core/rule_generate_column_substitute.go b/pkg/planner/core/rule_generate_column_substitute.go index cea9a393798aa..2ced26f743b2b 100644 --- a/pkg/planner/core/rule_generate_column_substitute.go +++ b/pkg/planner/core/rule_generate_column_substitute.go @@ -17,10 +17,10 @@ package core import ( "bytes" "context" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/types" h "github.com/pingcap/tidb/pkg/util/hint" ) @@ -38,7 +38,7 @@ type ExprColumnMap map[expression.Expression]*expression.Column // For example: select a+1 from t order by a+1, with a virtual generate column c as (a+1) and // an index on c. We need to replace a+1 with c so that we can use the index on c. // See also https://dev.mysql.com/doc/refman/8.0/en/generated-column-index-optimizations.html -func (gc *gcSubstituter) optimize(ctx context.Context, lp LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (gc *gcSubstituter) optimize(ctx context.Context, lp LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false exprToColumn := make(ExprColumnMap) collectGenerateColumn(lp, exprToColumn) @@ -85,7 +85,7 @@ func collectGenerateColumn(lp LogicalPlan, exprToColumn ExprColumnMap) { } } -func tryToSubstituteExpr(expr *expression.Expression, lp LogicalPlan, candidateExpr expression.Expression, tp types.EvalType, schema *expression.Schema, col *expression.Column, opt *util.LogicalOptimizeOp) bool { +func tryToSubstituteExpr(expr *expression.Expression, lp LogicalPlan, candidateExpr expression.Expression, tp types.EvalType, schema *expression.Schema, col *expression.Column, opt *coreusage.LogicalOptimizeOp) bool { changed := false if (*expr).Equal(lp.SCtx().GetExprCtx(), candidateExpr) && candidateExpr.GetType().EvalType() == tp && schema.ColumnIndex(col) != -1 { @@ -96,7 +96,7 @@ func tryToSubstituteExpr(expr *expression.Expression, lp LogicalPlan, candidateE return changed } -func appendSubstituteColumnStep(lp LogicalPlan, candidateExpr expression.Expression, col *expression.Column, opt *util.LogicalOptimizeOp) { +func appendSubstituteColumnStep(lp LogicalPlan, candidateExpr expression.Expression, col *expression.Column, opt *coreusage.LogicalOptimizeOp) { reason := func() string { return "" } action := func() string { buffer := bytes.NewBufferString("expression:") @@ -110,11 +110,11 @@ func appendSubstituteColumnStep(lp LogicalPlan, candidateExpr expression.Express } // SubstituteExpression is Exported for bench -func SubstituteExpression(cond expression.Expression, lp LogicalPlan, exprToColumn ExprColumnMap, schema *expression.Schema, opt *util.LogicalOptimizeOp) bool { +func SubstituteExpression(cond expression.Expression, lp LogicalPlan, exprToColumn ExprColumnMap, schema *expression.Schema, opt *coreusage.LogicalOptimizeOp) bool { return substituteExpression(cond, lp, exprToColumn, schema, opt) } -func substituteExpression(cond expression.Expression, lp LogicalPlan, exprToColumn ExprColumnMap, schema *expression.Schema, opt *util.LogicalOptimizeOp) bool { +func substituteExpression(cond expression.Expression, lp LogicalPlan, exprToColumn ExprColumnMap, schema *expression.Schema, opt *coreusage.LogicalOptimizeOp) bool { sf, ok := cond.(*expression.ScalarFunction) if !ok { return false @@ -173,7 +173,7 @@ func substituteExpression(cond expression.Expression, lp LogicalPlan, exprToColu return changed } -func (gc *gcSubstituter) substitute(ctx context.Context, lp LogicalPlan, exprToColumn ExprColumnMap, opt *util.LogicalOptimizeOp) LogicalPlan { +func (gc *gcSubstituter) substitute(ctx context.Context, lp LogicalPlan, exprToColumn ExprColumnMap, opt *coreusage.LogicalOptimizeOp) LogicalPlan { var tp types.EvalType switch x := lp.(type) { case *LogicalSelection: diff --git a/pkg/planner/core/rule_join_elimination.go b/pkg/planner/core/rule_join_elimination.go index 2a71714a78a48..b9476394230bf 100644 --- a/pkg/planner/core/rule_join_elimination.go +++ b/pkg/planner/core/rule_join_elimination.go @@ -18,10 +18,10 @@ import ( "bytes" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/util/set" ) @@ -35,7 +35,7 @@ type outerJoinEliminator struct { // 2. outer join elimination with duplicate agnostic aggregate functions: For example left outer join. // If the parent only use the columns from left table with 'distinct' label. The left outer join can // be eliminated. -func (o *outerJoinEliminator) tryToEliminateOuterJoin(p *LogicalJoin, aggCols []*expression.Column, parentCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (o *outerJoinEliminator) tryToEliminateOuterJoin(p *LogicalJoin, aggCols []*expression.Column, parentCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { var innerChildIdx int switch p.JoinType { case LeftOuterJoin: @@ -192,7 +192,7 @@ func GetDupAgnosticAggCols( return true, newAggCols } -func (o *outerJoinEliminator) doOptimize(p LogicalPlan, aggCols []*expression.Column, parentCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (o *outerJoinEliminator) doOptimize(p LogicalPlan, aggCols []*expression.Column, parentCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { // CTE's logical optimization is independent. if _, ok := p.(*LogicalCTE); ok { return p, nil @@ -246,7 +246,7 @@ func (o *outerJoinEliminator) doOptimize(p LogicalPlan, aggCols []*expression.Co return p, nil } -func (o *outerJoinEliminator) optimize(_ context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (o *outerJoinEliminator) optimize(_ context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false p, err := o.doOptimize(p, nil, nil, opt) return p, planChanged, err @@ -257,7 +257,7 @@ func (*outerJoinEliminator) name() string { } func appendOuterJoinEliminateTraceStep(join *LogicalJoin, outerPlan LogicalPlan, parentCols []*expression.Column, - innerJoinKeys *expression.Schema, opt *util.LogicalOptimizeOp) { + innerJoinKeys *expression.Schema, opt *coreusage.LogicalOptimizeOp) { reason := func() string { buffer := bytes.NewBufferString("The columns[") for i, col := range parentCols { @@ -282,7 +282,7 @@ func appendOuterJoinEliminateTraceStep(join *LogicalJoin, outerPlan LogicalPlan, opt.AppendStepToCurrent(join.ID(), join.TP(), reason, action) } -func appendOuterJoinEliminateAggregationTraceStep(join *LogicalJoin, outerPlan LogicalPlan, aggCols []*expression.Column, opt *util.LogicalOptimizeOp) { +func appendOuterJoinEliminateAggregationTraceStep(join *LogicalJoin, outerPlan LogicalPlan, aggCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) { reason := func() string { buffer := bytes.NewBufferString("The columns[") for i, col := range aggCols { diff --git a/pkg/planner/core/rule_join_reorder.go b/pkg/planner/core/rule_join_reorder.go index 6acdb08a21381..09fd05d018405 100644 --- a/pkg/planner/core/rule_join_reorder.go +++ b/pkg/planner/core/rule_join_reorder.go @@ -18,11 +18,11 @@ import ( "bytes" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "slices" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/planner/util" h "github.com/pingcap/tidb/pkg/util/hint" "github.com/pingcap/tidb/pkg/util/plancodec" "github.com/pingcap/tidb/pkg/util/tracing" @@ -223,7 +223,7 @@ type joinTypeWithExtMsg struct { outerBindCondition []expression.Expression } -func (s *joinReOrderSolver) optimize(_ context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (s *joinReOrderSolver) optimize(_ context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false tracer := &joinReorderTrace{cost: map[string]float64{}, opt: opt} tracer.traceJoinReorder(p) @@ -663,7 +663,7 @@ func (*joinReOrderSolver) name() string { return "join_reorder" } -func appendJoinReorderTraceStep(tracer *joinReorderTrace, plan LogicalPlan, opt *util.LogicalOptimizeOp) { +func appendJoinReorderTraceStep(tracer *joinReorderTrace, plan LogicalPlan, opt *coreusage.LogicalOptimizeOp) { if len(tracer.initial) < 1 || len(tracer.final) < 1 { return } @@ -773,7 +773,7 @@ func findRoots(t *tracing.PlanTrace) []*tracing.PlanTrace { } type joinReorderTrace struct { - opt *util.LogicalOptimizeOp + opt *coreusage.LogicalOptimizeOp initial string final string cost map[string]float64 diff --git a/pkg/planner/core/rule_max_min_eliminate.go b/pkg/planner/core/rule_max_min_eliminate.go index 9f523d469ccb7..329020d77c13d 100644 --- a/pkg/planner/core/rule_max_min_eliminate.go +++ b/pkg/planner/core/rule_max_min_eliminate.go @@ -18,6 +18,7 @@ import ( "bytes" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" @@ -36,13 +37,13 @@ import ( type maxMinEliminator struct { } -func (a *maxMinEliminator) optimize(_ context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (a *maxMinEliminator) optimize(_ context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false return a.eliminateMaxMin(p, opt), planChanged, nil } // composeAggsByInnerJoin composes the scalar aggregations by cartesianJoin. -func (*maxMinEliminator) composeAggsByInnerJoin(originAgg *LogicalAggregation, aggs []*LogicalAggregation, opt *util.LogicalOptimizeOp) (plan LogicalPlan) { +func (*maxMinEliminator) composeAggsByInnerJoin(originAgg *LogicalAggregation, aggs []*LogicalAggregation, opt *coreusage.LogicalOptimizeOp) (plan LogicalPlan) { plan = aggs[0] sctx := plan.SCtx() joins := make([]*LogicalJoin, 0) @@ -138,7 +139,7 @@ func (a *maxMinEliminator) cloneSubPlans(plan LogicalPlan) LogicalPlan { // `select max(a) from t` + `select min(a) from t` + `select max(b) from t`. // Then we check whether `a` and `b` have indices. If any of the used column has no index, we cannot eliminate // this aggregation. -func (a *maxMinEliminator) splitAggFuncAndCheckIndices(agg *LogicalAggregation, opt *util.LogicalOptimizeOp) (aggs []*LogicalAggregation, canEliminate bool) { +func (a *maxMinEliminator) splitAggFuncAndCheckIndices(agg *LogicalAggregation, opt *coreusage.LogicalOptimizeOp) (aggs []*LogicalAggregation, canEliminate bool) { for _, f := range agg.AggFuncs { // We must make sure the args of max/min is a simple single column. col, ok := f.Args[0].(*expression.Column) @@ -170,7 +171,7 @@ func (a *maxMinEliminator) splitAggFuncAndCheckIndices(agg *LogicalAggregation, } // eliminateSingleMaxMin tries to convert a single max/min to Limit+Sort operators. -func (*maxMinEliminator) eliminateSingleMaxMin(agg *LogicalAggregation, opt *util.LogicalOptimizeOp) *LogicalAggregation { +func (*maxMinEliminator) eliminateSingleMaxMin(agg *LogicalAggregation, opt *coreusage.LogicalOptimizeOp) *LogicalAggregation { f := agg.AggFuncs[0] child := agg.Children()[0] ctx := agg.SCtx() @@ -211,7 +212,7 @@ func (*maxMinEliminator) eliminateSingleMaxMin(agg *LogicalAggregation, opt *uti } // eliminateMaxMin tries to convert max/min to Limit+Sort operators. -func (a *maxMinEliminator) eliminateMaxMin(p LogicalPlan, opt *util.LogicalOptimizeOp) LogicalPlan { +func (a *maxMinEliminator) eliminateMaxMin(p LogicalPlan, opt *coreusage.LogicalOptimizeOp) LogicalPlan { // CTE's logical optimization is indenpent. if _, ok := p.(*LogicalCTE); ok { return p @@ -261,7 +262,7 @@ func (*maxMinEliminator) name() string { return "max_min_eliminate" } -func appendEliminateSingleMaxMinTrace(agg *LogicalAggregation, sel *LogicalSelection, sort *LogicalSort, limit *LogicalLimit, opt *util.LogicalOptimizeOp) { +func appendEliminateSingleMaxMinTrace(agg *LogicalAggregation, sel *LogicalSelection, sort *LogicalSort, limit *LogicalLimit, opt *coreusage.LogicalOptimizeOp) { action := func() string { buffer := bytes.NewBufferString("") if sel != nil { @@ -286,7 +287,7 @@ func appendEliminateSingleMaxMinTrace(agg *LogicalAggregation, sel *LogicalSelec opt.AppendStepToCurrent(agg.ID(), agg.TP(), reason, action) } -func appendEliminateMultiMinMaxTraceStep(originAgg *LogicalAggregation, aggs []*LogicalAggregation, joins []*LogicalJoin, opt *util.LogicalOptimizeOp) { +func appendEliminateMultiMinMaxTraceStep(originAgg *LogicalAggregation, aggs []*LogicalAggregation, joins []*LogicalJoin, opt *coreusage.LogicalOptimizeOp) { action := func() string { buffer := bytes.NewBufferString(fmt.Sprintf("%v_%v splited into [", originAgg.TP(), originAgg.ID())) for i, agg := range aggs { diff --git a/pkg/planner/core/rule_partition_processor.go b/pkg/planner/core/rule_partition_processor.go index 850a7efdc3e96..8a695ad733693 100644 --- a/pkg/planner/core/rule_partition_processor.go +++ b/pkg/planner/core/rule_partition_processor.go @@ -19,6 +19,7 @@ import ( "cmp" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "math" "slices" "sort" @@ -29,7 +30,6 @@ import ( "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/types" @@ -62,13 +62,13 @@ const FullRange = -1 // partitionProcessor is here because it's easier to prune partition after predicate push down. type partitionProcessor struct{} -func (s *partitionProcessor) optimize(_ context.Context, lp LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (s *partitionProcessor) optimize(_ context.Context, lp LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false p, err := s.rewriteDataSource(lp, opt) return p, planChanged, err } -func (s *partitionProcessor) rewriteDataSource(lp LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (s *partitionProcessor) rewriteDataSource(lp LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { // Assert there will not be sel -> sel in the ast. switch p := lp.(type) { case *DataSource: @@ -502,7 +502,7 @@ func (*partitionProcessor) reconstructTableColNames(ds *DataSource) ([]*types.Fi return names, nil } -func (s *partitionProcessor) processHashOrKeyPartition(ds *DataSource, pi *model.PartitionInfo, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (s *partitionProcessor) processHashOrKeyPartition(ds *DataSource, pi *model.PartitionInfo, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { names, err := s.reconstructTableColNames(ds) if err != nil { return nil, err @@ -824,7 +824,7 @@ func (s *partitionProcessor) pruneListPartition(ctx PlanContext, tbl table.Table return used, nil } -func (s *partitionProcessor) prune(ds *DataSource, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (s *partitionProcessor) prune(ds *DataSource, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { pi := ds.tableInfo.GetPartitionInfo() if pi == nil { return ds, nil @@ -1037,7 +1037,7 @@ func (s *partitionProcessor) pruneRangePartition(ctx PlanContext, pi *model.Part return result, nil } -func (s *partitionProcessor) processRangePartition(ds *DataSource, pi *model.PartitionInfo, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (s *partitionProcessor) processRangePartition(ds *DataSource, pi *model.PartitionInfo, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { used, err := s.pruneRangePartition(ds.SCtx(), pi, ds.table.(table.PartitionedTable), ds.allConds, ds.TblCols, ds.names) if err != nil { return nil, err @@ -1045,7 +1045,7 @@ func (s *partitionProcessor) processRangePartition(ds *DataSource, pi *model.Par return s.makeUnionAllChildren(ds, pi, used, opt) } -func (s *partitionProcessor) processListPartition(ds *DataSource, pi *model.PartitionInfo, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (s *partitionProcessor) processListPartition(ds *DataSource, pi *model.PartitionInfo, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { used, err := s.pruneListPartition(ds.SCtx(), ds.table, ds.partitionNames, ds.allConds, ds.TblCols) if err != nil { return nil, err @@ -1767,7 +1767,7 @@ func (*partitionProcessor) checkHintsApplicable(ds *DataSource, partitionSet set appendWarnForUnknownPartitions(ds.SCtx(), h.HintReadFromStorage, unknownPartitions) } -func (s *partitionProcessor) makeUnionAllChildren(ds *DataSource, pi *model.PartitionInfo, or partitionRangeOR, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (s *partitionProcessor) makeUnionAllChildren(ds *DataSource, pi *model.PartitionInfo, or partitionRangeOR, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { children := make([]LogicalPlan, 0, len(pi.Definitions)) partitionNameSet := make(set.StringSet) usedDefinition := make(map[int64]model.PartitionDefinition) @@ -2003,7 +2003,7 @@ func (p *rangeColumnsPruner) pruneUseBinarySearch(sctx PlanContext, op string, d return start, end } -func appendMakeUnionAllChildrenTranceStep(origin *DataSource, usedMap map[int64]model.PartitionDefinition, plan LogicalPlan, children []LogicalPlan, opt *util.LogicalOptimizeOp) { +func appendMakeUnionAllChildrenTranceStep(origin *DataSource, usedMap map[int64]model.PartitionDefinition, plan LogicalPlan, children []LogicalPlan, opt *coreusage.LogicalOptimizeOp) { if opt.TracerIsNil() { return } @@ -2059,7 +2059,7 @@ func appendMakeUnionAllChildrenTranceStep(origin *DataSource, usedMap map[int64] opt.AppendStepToCurrent(origin.ID(), origin.TP(), reason, action) } -func appendNoPartitionChildTraceStep(ds *DataSource, dual LogicalPlan, opt *util.LogicalOptimizeOp) { +func appendNoPartitionChildTraceStep(ds *DataSource, dual LogicalPlan, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v becomes %v_%v", ds.TP(), ds.ID(), dual.TP(), dual.ID()) } diff --git a/pkg/planner/core/rule_predicate_push_down.go b/pkg/planner/core/rule_predicate_push_down.go index 9561108586257..9f4f73753599e 100644 --- a/pkg/planner/core/rule_predicate_push_down.go +++ b/pkg/planner/core/rule_predicate_push_down.go @@ -18,6 +18,7 @@ import ( "bytes" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" @@ -41,13 +42,13 @@ type exprPrefixAdder struct { lengths []int } -func (*ppdSolver) optimize(_ context.Context, lp LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (*ppdSolver) optimize(_ context.Context, lp LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false _, p := lp.PredicatePushDown(nil, opt) return p, planChanged, nil } -func addSelection(p LogicalPlan, child LogicalPlan, conditions []expression.Expression, chIdx int, opt *util.LogicalOptimizeOp) { +func addSelection(p LogicalPlan, child LogicalPlan, conditions []expression.Expression, chIdx int, opt *coreusage.LogicalOptimizeOp) { if len(conditions) == 0 { p.Children()[chIdx] = child return @@ -73,7 +74,7 @@ func addSelection(p LogicalPlan, child LogicalPlan, conditions []expression.Expr } // PredicatePushDown implements LogicalPlan interface. -func (p *baseLogicalPlan) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (p *baseLogicalPlan) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { if len(p.children) == 0 { return predicates, p.self } @@ -97,7 +98,7 @@ func splitSetGetVarFunc(filters []expression.Expression) ([]expression.Expressio } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalSelection) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (p *LogicalSelection) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { predicates = DeleteTrueExprs(p, predicates) p.Conditions = DeleteTrueExprs(p, p.Conditions) var child LogicalPlan @@ -123,7 +124,7 @@ func (p *LogicalSelection) PredicatePushDown(predicates []expression.Expression, } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalUnionScan) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (p *LogicalUnionScan) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { retainedPredicates, _ := p.children[0].PredicatePushDown(predicates, opt) p.conditions = make([]expression.Expression, 0, len(predicates)) p.conditions = append(p.conditions, predicates...) @@ -132,7 +133,7 @@ func (p *LogicalUnionScan) PredicatePushDown(predicates []expression.Expression, } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (ds *DataSource) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (ds *DataSource) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { predicates = expression.PropagateConstant(ds.SCtx().GetExprCtx(), predicates) predicates = DeleteTrueExprs(ds, predicates) // Add tidb_shard() prefix to the condtion for shard index in some scenarios @@ -145,12 +146,12 @@ func (ds *DataSource) PredicatePushDown(predicates []expression.Expression, opt } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalTableDual) PredicatePushDown(predicates []expression.Expression, _ *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (p *LogicalTableDual) PredicatePushDown(predicates []expression.Expression, _ *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { return predicates, p } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalJoin) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) { +func (p *LogicalJoin) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) { simplifyOuterJoin(p, predicates) var equalCond []*expression.ScalarFunction var leftPushCond, rightPushCond, otherCond, leftCond, rightCond []expression.Expression @@ -493,7 +494,7 @@ func specialNullRejectedCase1(ctx PlanContext, schema *expression.Schema, expr e } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalExpand) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) { +func (p *LogicalExpand) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) { // Note that, grouping column related predicates can't be pushed down, since grouping column has nullability change after Expand OP itself. // condition related with grouping column shouldn't be pushed down through it. // currently, since expand is adjacent to aggregate, any filter above aggregate wanted to be push down through expand only have two cases: @@ -505,7 +506,7 @@ func (p *LogicalExpand) PredicatePushDown(predicates []expression.Expression, op } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalProjection) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) { +func (p *LogicalProjection) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) { canBePushed := make([]expression.Expression, 0, len(predicates)) canNotBePushed := make([]expression.Expression, 0, len(predicates)) for _, expr := range p.Exprs { @@ -528,7 +529,7 @@ func (p *LogicalProjection) PredicatePushDown(predicates []expression.Expression } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalUnionAll) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) { +func (p *LogicalUnionAll) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) { for i, proj := range p.children { newExprs := make([]expression.Expression, 0, len(predicates)) newExprs = append(newExprs, predicates...) @@ -629,7 +630,7 @@ func (la *LogicalAggregation) pushDownDNFPredicatesForAggregation(cond expressio } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (la *LogicalAggregation) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) { +func (la *LogicalAggregation) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) { var condsToPush []expression.Expression exprsOriginal := make([]expression.Expression, 0, len(la.AggFuncs)) for _, fun := range la.AggFuncs { @@ -651,14 +652,14 @@ func (la *LogicalAggregation) PredicatePushDown(predicates []expression.Expressi } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalLimit) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (p *LogicalLimit) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { // Limit forbids any condition to push down. p.baseLogicalPlan.PredicatePushDown(nil, opt) return predicates, p } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalMaxOneRow) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (p *LogicalMaxOneRow) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { // MaxOneRow forbids any condition to push down. p.baseLogicalPlan.PredicatePushDown(nil, opt) return predicates, p @@ -809,7 +810,7 @@ func (p *LogicalWindow) GetPartitionByCols() []*expression.Column { } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalWindow) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (p *LogicalWindow) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { canBePushed := make([]expression.Expression, 0, len(predicates)) canNotBePushed := make([]expression.Expression, 0, len(predicates)) partitionCols := expression.NewSchema(p.GetPartitionByCols()...) @@ -827,7 +828,7 @@ func (p *LogicalWindow) PredicatePushDown(predicates []expression.Expression, op } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalMemTable) PredicatePushDown(predicates []expression.Expression, _ *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (p *LogicalMemTable) PredicatePushDown(predicates []expression.Expression, _ *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { if p.Extractor != nil { predicates = p.Extractor.Extract(p.SCtx(), p.schema, p.names, predicates) } @@ -838,7 +839,7 @@ func (*ppdSolver) name() string { return "predicate_push_down" } -func appendTableDualTraceStep(replaced LogicalPlan, dual LogicalPlan, conditions []expression.Expression, opt *util.LogicalOptimizeOp) { +func appendTableDualTraceStep(replaced LogicalPlan, dual LogicalPlan, conditions []expression.Expression, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v is replaced by %v_%v", replaced.TP(), replaced.ID(), dual.TP(), dual.ID()) } @@ -856,7 +857,7 @@ func appendTableDualTraceStep(replaced LogicalPlan, dual LogicalPlan, conditions opt.AppendStepToCurrent(dual.ID(), dual.TP(), reason, action) } -func appendSelectionPredicatePushDownTraceStep(p *LogicalSelection, conditions []expression.Expression, opt *util.LogicalOptimizeOp) { +func appendSelectionPredicatePushDownTraceStep(p *LogicalSelection, conditions []expression.Expression, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v is removed", p.TP(), p.ID()) } @@ -879,7 +880,7 @@ func appendSelectionPredicatePushDownTraceStep(p *LogicalSelection, conditions [ opt.AppendStepToCurrent(p.ID(), p.TP(), reason, action) } -func appendDataSourcePredicatePushDownTraceStep(ds *DataSource, opt *util.LogicalOptimizeOp) { +func appendDataSourcePredicatePushDownTraceStep(ds *DataSource, opt *coreusage.LogicalOptimizeOp) { if len(ds.pushedDownConds) < 1 { return } @@ -900,7 +901,7 @@ func appendDataSourcePredicatePushDownTraceStep(ds *DataSource, opt *util.Logica opt.AppendStepToCurrent(ds.ID(), ds.TP(), reason, action) } -func appendAddSelectionTraceStep(p LogicalPlan, child LogicalPlan, sel *LogicalSelection, opt *util.LogicalOptimizeOp) { +func appendAddSelectionTraceStep(p LogicalPlan, child LogicalPlan, sel *LogicalSelection, opt *coreusage.LogicalOptimizeOp) { reason := func() string { return "" } @@ -1031,7 +1032,7 @@ func (adder *exprPrefixAdder) addExprPrefix4DNFCond(condition *expression.Scalar } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalCTE) PredicatePushDown(predicates []expression.Expression, _ *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (p *LogicalCTE) PredicatePushDown(predicates []expression.Expression, _ *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { if p.cte.recursivePartLogicalPlan != nil { // Doesn't support recursive CTE yet. return predicates, p.self @@ -1067,7 +1068,7 @@ func (p *LogicalCTE) PredicatePushDown(predicates []expression.Expression, _ *ut // PredicatePushDown implements the LogicalPlan interface. // Currently, we only maintain the main query tree. -func (p *LogicalSequence) PredicatePushDown(predicates []expression.Expression, op *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (p *LogicalSequence) PredicatePushDown(predicates []expression.Expression, op *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { lastIdx := len(p.children) - 1 remained, newLastChild := p.children[lastIdx].PredicatePushDown(predicates, op) p.SetChild(lastIdx, newLastChild) diff --git a/pkg/planner/core/rule_predicate_simplification.go b/pkg/planner/core/rule_predicate_simplification.go index dabc4a85b55de..87b5c7172a51c 100644 --- a/pkg/planner/core/rule_predicate_simplification.go +++ b/pkg/planner/core/rule_predicate_simplification.go @@ -17,11 +17,11 @@ package core import ( "context" "errors" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "slices" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/planner/util" ) // predicateSimplification consolidates different predcicates on a column and its equivalence classes. Initial out is for @@ -65,12 +65,12 @@ func findPredicateType(expr expression.Expression) (*expression.Column, predicat return nil, otherPredicate } -func (*predicateSimplification) optimize(_ context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (*predicateSimplification) optimize(_ context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false return p.predicateSimplification(opt), planChanged, nil } -func (s *baseLogicalPlan) predicateSimplification(opt *util.LogicalOptimizeOp) LogicalPlan { +func (s *baseLogicalPlan) predicateSimplification(opt *coreusage.LogicalOptimizeOp) LogicalPlan { p := s.self for i, child := range p.Children() { newChild := child.predicateSimplification(opt) @@ -155,7 +155,7 @@ func applyPredicateSimplification(sctx PlanContext, predicates []expression.Expr return newValues } -func (ds *DataSource) predicateSimplification(*util.LogicalOptimizeOp) LogicalPlan { +func (ds *DataSource) predicateSimplification(*coreusage.LogicalOptimizeOp) LogicalPlan { p := ds.self.(*DataSource) p.pushedDownConds = applyPredicateSimplification(p.SCtx(), p.pushedDownConds) p.allConds = applyPredicateSimplification(p.SCtx(), p.allConds) diff --git a/pkg/planner/core/rule_push_down_sequence.go b/pkg/planner/core/rule_push_down_sequence.go index 17964353c5b52..fc248c3744b51 100644 --- a/pkg/planner/core/rule_push_down_sequence.go +++ b/pkg/planner/core/rule_push_down_sequence.go @@ -17,7 +17,7 @@ package core import ( "context" - "github.com/pingcap/tidb/pkg/planner/util" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" ) type pushDownSequenceSolver struct { @@ -27,7 +27,7 @@ func (*pushDownSequenceSolver) name() string { return "push_down_sequence" } -func (pdss *pushDownSequenceSolver) optimize(_ context.Context, lp LogicalPlan, _ *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (pdss *pushDownSequenceSolver) optimize(_ context.Context, lp LogicalPlan, _ *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false return pdss.recursiveOptimize(nil, lp), planChanged, nil } diff --git a/pkg/planner/core/rule_resolve_grouping_expand.go b/pkg/planner/core/rule_resolve_grouping_expand.go index 53e5892aaea05..563d7fd7650b1 100644 --- a/pkg/planner/core/rule_resolve_grouping_expand.go +++ b/pkg/planner/core/rule_resolve_grouping_expand.go @@ -16,8 +16,7 @@ package core import ( "context" - - "github.com/pingcap/tidb/pkg/planner/util" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" ) // For normal rollup Expand construction, its logical Expand should be bound @@ -74,7 +73,7 @@ type resolveExpand struct { // (upper required) (grouping sets columns appended) // // Expand operator itself is kind like a projection, while difference is that it has a multi projection list, named as leveled projection. -func (*resolveExpand) optimize(_ context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (*resolveExpand) optimize(_ context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false // As you see, Expand's leveled projection should be built after all column-prune is done. So we just make generating-leveled-projection // as the last rule of logical optimization, which is more clear. (spark has column prune action before building expand) @@ -86,7 +85,7 @@ func (*resolveExpand) name() string { return "resolve_expand" } -func genExpand(p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func genExpand(p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { for i, child := range p.Children() { np, err := genExpand(child, opt) if err != nil { diff --git a/pkg/planner/core/rule_result_reorder.go b/pkg/planner/core/rule_result_reorder.go index b503a6ede7326..42764f028cb86 100644 --- a/pkg/planner/core/rule_result_reorder.go +++ b/pkg/planner/core/rule_result_reorder.go @@ -16,6 +16,7 @@ package core import ( "context" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/planner/util" @@ -39,7 +40,7 @@ This rule reorders results by modifying or injecting a Sort operator: type resultReorder struct { } -func (rs *resultReorder) optimize(_ context.Context, lp LogicalPlan, _ *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (rs *resultReorder) optimize(_ context.Context, lp LogicalPlan, _ *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false ordered := rs.completeSort(lp) if !ordered { diff --git a/pkg/planner/core/rule_semi_join_rewrite.go b/pkg/planner/core/rule_semi_join_rewrite.go index 0c78e6ec6e921..fc9222b9784b1 100644 --- a/pkg/planner/core/rule_semi_join_rewrite.go +++ b/pkg/planner/core/rule_semi_join_rewrite.go @@ -16,11 +16,11 @@ package core import ( "context" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/planner/util" h "github.com/pingcap/tidb/pkg/util/hint" ) @@ -37,7 +37,7 @@ import ( type semiJoinRewriter struct { } -func (smj *semiJoinRewriter) optimize(_ context.Context, p LogicalPlan, _ *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (smj *semiJoinRewriter) optimize(_ context.Context, p LogicalPlan, _ *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false newLogicalPlan, err := smj.recursivePlan(p) return newLogicalPlan, planChanged, err diff --git a/pkg/planner/core/rule_topn_push_down.go b/pkg/planner/core/rule_topn_push_down.go index 830995f55387d..d1802374b7f4f 100644 --- a/pkg/planner/core/rule_topn_push_down.go +++ b/pkg/planner/core/rule_topn_push_down.go @@ -18,6 +18,7 @@ import ( "bytes" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/planner/util" @@ -27,12 +28,12 @@ import ( type pushDownTopNOptimizer struct { } -func (*pushDownTopNOptimizer) optimize(_ context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (*pushDownTopNOptimizer) optimize(_ context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false return p.pushDownTopN(nil, opt), planChanged, nil } -func (s *baseLogicalPlan) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeOp) LogicalPlan { +func (s *baseLogicalPlan) pushDownTopN(topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) LogicalPlan { p := s.self for i, child := range p.Children() { p.Children()[i] = child.pushDownTopN(nil, opt) @@ -43,7 +44,7 @@ func (s *baseLogicalPlan) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptim return p } -func (p *LogicalCTE) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeOp) LogicalPlan { +func (p *LogicalCTE) pushDownTopN(topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) LogicalPlan { if topN != nil { return topN.setChild(p, opt) } @@ -51,7 +52,7 @@ func (p *LogicalCTE) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeOp } // setChild set p as topn's child. -func (lt *LogicalTopN) setChild(p LogicalPlan, opt *util.LogicalOptimizeOp) LogicalPlan { +func (lt *LogicalTopN) setChild(p LogicalPlan, opt *coreusage.LogicalOptimizeOp) LogicalPlan { // Remove this TopN if its child is a TableDual. dual, isDual := p.(*LogicalTableDual) if isDual { @@ -81,7 +82,7 @@ func (lt *LogicalTopN) setChild(p LogicalPlan, opt *util.LogicalOptimizeOp) Logi return lt } -func (ls *LogicalSort) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeOp) LogicalPlan { +func (ls *LogicalSort) pushDownTopN(topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) LogicalPlan { if topN == nil { return ls.baseLogicalPlan.pushDownTopN(nil, opt) } else if topN.isLimit() { @@ -93,13 +94,13 @@ func (ls *LogicalSort) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimize return ls.children[0].pushDownTopN(topN, opt) } -func (p *LogicalLimit) convertToTopN(opt *util.LogicalOptimizeOp) *LogicalTopN { +func (p *LogicalLimit) convertToTopN(opt *coreusage.LogicalOptimizeOp) *LogicalTopN { topn := LogicalTopN{Offset: p.Offset, Count: p.Count, PreferLimitToCop: p.PreferLimitToCop}.Init(p.SCtx(), p.QueryBlockOffset()) appendConvertTopNTraceStep(p, topn, opt) return topn } -func (p *LogicalLimit) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeOp) LogicalPlan { +func (p *LogicalLimit) pushDownTopN(topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) LogicalPlan { child := p.children[0].pushDownTopN(p.convertToTopN(opt), opt) if topN != nil { return topN.setChild(child, opt) @@ -107,7 +108,7 @@ func (p *LogicalLimit) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimize return child } -func (p *LogicalUnionAll) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeOp) LogicalPlan { +func (p *LogicalUnionAll) pushDownTopN(topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) LogicalPlan { for i, child := range p.children { var newTopN *LogicalTopN if topN != nil { @@ -126,7 +127,7 @@ func (p *LogicalUnionAll) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptim return p } -func (p *LogicalProjection) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeOp) LogicalPlan { +func (p *LogicalProjection) pushDownTopN(topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) LogicalPlan { for _, expr := range p.Exprs { if expression.HasAssignSetVarFunc(expr) { return p.baseLogicalPlan.pushDownTopN(topN, opt) @@ -164,7 +165,7 @@ func (p *LogicalProjection) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOpt return p } -func (p *LogicalLock) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeOp) LogicalPlan { +func (p *LogicalLock) pushDownTopN(topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) LogicalPlan { if topN != nil { p.children[0] = p.children[0].pushDownTopN(topN, opt) } @@ -172,7 +173,7 @@ func (p *LogicalLock) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeO } // pushDownTopNToChild will push a topN to one child of join. The idx stands for join child index. 0 is for left child. -func (p *LogicalJoin) pushDownTopNToChild(topN *LogicalTopN, idx int, opt *util.LogicalOptimizeOp) LogicalPlan { +func (p *LogicalJoin) pushDownTopNToChild(topN *LogicalTopN, idx int, opt *coreusage.LogicalOptimizeOp) LogicalPlan { if topN == nil { return p.children[idx].pushDownTopN(nil, opt) } @@ -198,7 +199,7 @@ func (p *LogicalJoin) pushDownTopNToChild(topN *LogicalTopN, idx int, opt *util. return p.children[idx].pushDownTopN(newTopN, opt) } -func (p *LogicalJoin) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeOp) LogicalPlan { +func (p *LogicalJoin) pushDownTopN(topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) LogicalPlan { switch p.JoinType { case LeftOuterJoin, LeftOuterSemiJoin, AntiLeftOuterSemiJoin: p.children[0] = p.pushDownTopNToChild(topN, 0, opt) @@ -221,7 +222,7 @@ func (*pushDownTopNOptimizer) name() string { return "topn_push_down" } -func appendTopNPushDownTraceStep(parent LogicalPlan, child LogicalPlan, opt *util.LogicalOptimizeOp) { +func appendTopNPushDownTraceStep(parent LogicalPlan, child LogicalPlan, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v is added as %v_%v's parent", parent.TP(), parent.ID(), child.TP(), child.ID()) } @@ -231,7 +232,7 @@ func appendTopNPushDownTraceStep(parent LogicalPlan, child LogicalPlan, opt *uti opt.AppendStepToCurrent(parent.ID(), parent.TP(), reason, action) } -func appendTopNPushDownJoinTraceStep(p *LogicalJoin, topN *LogicalTopN, idx int, opt *util.LogicalOptimizeOp) { +func appendTopNPushDownJoinTraceStep(p *LogicalJoin, topN *LogicalTopN, idx int, opt *coreusage.LogicalOptimizeOp) { action := func() string { buffer := bytes.NewBufferString(fmt.Sprintf("%v_%v is added and pushed into %v_%v's ", topN.TP(), topN.ID(), p.TP(), p.ID())) @@ -263,7 +264,7 @@ func appendTopNPushDownJoinTraceStep(p *LogicalJoin, topN *LogicalTopN, idx int, opt.AppendStepToCurrent(p.ID(), p.TP(), reason, action) } -func appendSortPassByItemsTraceStep(sort *LogicalSort, topN *LogicalTopN, opt *util.LogicalOptimizeOp) { +func appendSortPassByItemsTraceStep(sort *LogicalSort, topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) { action := func() string { buffer := bytes.NewBufferString(fmt.Sprintf("%v_%v passes ByItems[", sort.TP(), sort.ID())) for i, item := range sort.ByItems { @@ -281,7 +282,7 @@ func appendSortPassByItemsTraceStep(sort *LogicalSort, topN *LogicalTopN, opt *u opt.AppendStepToCurrent(sort.ID(), sort.TP(), reason, action) } -func appendNewTopNTraceStep(topN *LogicalTopN, union *LogicalUnionAll, opt *util.LogicalOptimizeOp) { +func appendNewTopNTraceStep(topN *LogicalTopN, union *LogicalUnionAll, opt *coreusage.LogicalOptimizeOp) { reason := func() string { return "" } @@ -291,7 +292,7 @@ func appendNewTopNTraceStep(topN *LogicalTopN, union *LogicalUnionAll, opt *util opt.AppendStepToCurrent(topN.ID(), topN.TP(), reason, action) } -func appendConvertTopNTraceStep(p LogicalPlan, topN *LogicalTopN, opt *util.LogicalOptimizeOp) { +func appendConvertTopNTraceStep(p LogicalPlan, topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) { reason := func() string { return "" } diff --git a/pkg/planner/core/task.go b/pkg/planner/core/task.go index ceb35ed3e4b17..af80eebd545b4 100644 --- a/pkg/planner/core/task.go +++ b/pkg/planner/core/task.go @@ -41,22 +41,8 @@ import ( "go.uber.org/zap" ) -var ( - _ task = &copTask{} - _ task = &rootTask{} - _ task = &mppTask{} -) - -// task is a new version of `PhysicalPlanInfo`. It stores cost information for a task. -// A task may be CopTask, RootTask, MPPTaskMeta or a ParallelTask. -type task interface { - count() float64 - copy() task - plan() PhysicalPlan - invalid() bool - convertToRootTask(ctx PlanContext) *rootTask - MemoryUsage() int64 -} +var _ Task = &copTask{} +var _ Task = &mppTask{} // copTask is a task that runs in a distributed kv store. // TODO: In future, we should split copTask to indexTask and tableTask. @@ -98,36 +84,32 @@ type copTask struct { expectCnt uint64 } -func (t *copTask) invalid() bool { +func (t *copTask) Invalid() bool { return t.tablePlan == nil && t.indexPlan == nil && len(t.idxMergePartPlans) == 0 } -func (t *rootTask) invalid() bool { - return t.p == nil -} - -func (t *copTask) count() float64 { +func (t *copTask) Count() float64 { if t.indexPlanFinished { return t.tablePlan.StatsInfo().RowCount } return t.indexPlan.StatsInfo().RowCount } -func (t *copTask) copy() task { +func (t *copTask) Copy() Task { nt := *t return &nt } // copTask plan should be careful with indexMergeReader, whose real plan is stored in // idxMergePartPlans, when its indexPlanFinished is marked with false. -func (t *copTask) plan() PhysicalPlan { +func (t *copTask) Plan() PhysicalPlan { if t.indexPlanFinished { return t.tablePlan } return t.indexPlan } -func attachPlan2Task(p PhysicalPlan, t task) task { +func attachPlan2Task(p PhysicalPlan, t Task) Task { switch v := t.(type) { case *copTask: if v.indexPlanFinished { @@ -137,9 +119,9 @@ func attachPlan2Task(p PhysicalPlan, t task) task { p.SetChildren(v.indexPlan) v.indexPlan = p } - case *rootTask: - p.SetChildren(v.p) - v.p = p + case *RootTask: + p.SetChildren(v.GetPlan()) + v.SetPlan(p) case *mppTask: p.SetChildren(v.p) v.p = p @@ -219,89 +201,85 @@ func (t *copTask) MemoryUsage() (sum int64) { return } -func (p *basePhysicalPlan) attach2Task(tasks ...task) task { - t := tasks[0].convertToRootTask(p.SCtx()) +func (p *basePhysicalPlan) Attach2Task(tasks ...Task) Task { + t := tasks[0].ConvertToRootTask(p.SCtx()) return attachPlan2Task(p.self, t) } -func (p *PhysicalUnionScan) attach2Task(tasks ...task) task { +func (p *PhysicalUnionScan) Attach2Task(tasks ...Task) Task { // We need to pull the projection under unionScan upon unionScan. // Since the projection only prunes columns, it's ok the put it upon unionScan. - if sel, ok := tasks[0].plan().(*PhysicalSelection); ok { + if sel, ok := tasks[0].Plan().(*PhysicalSelection); ok { if pj, ok := sel.children[0].(*PhysicalProjection); ok { // Convert unionScan->selection->projection to projection->unionScan->selection. sel.SetChildren(pj.children...) p.SetChildren(sel) - p.SetStats(tasks[0].plan().StatsInfo()) - rt, _ := tasks[0].(*rootTask) - rt.p = p + p.SetStats(tasks[0].Plan().StatsInfo()) + rt, _ := tasks[0].(*RootTask) + rt.SetPlan(p) pj.SetChildren(p) return pj.attach2Task(tasks...) } } - if pj, ok := tasks[0].plan().(*PhysicalProjection); ok { + if pj, ok := tasks[0].Plan().(*PhysicalProjection); ok { // Convert unionScan->projection to projection->unionScan, because unionScan can't handle projection as its children. p.SetChildren(pj.children...) - p.SetStats(tasks[0].plan().StatsInfo()) - rt, _ := tasks[0].(*rootTask) - rt.p = pj.children[0] + p.SetStats(tasks[0].Plan().StatsInfo()) + rt, _ := tasks[0].(*RootTask) + rt.SetPlan(pj.children[0]) pj.SetChildren(p) - return pj.attach2Task(p.basePhysicalPlan.attach2Task(tasks...)) + return pj.attach2Task(p.basePhysicalPlan.Attach2Task(tasks...)) } - p.SetStats(tasks[0].plan().StatsInfo()) - return p.basePhysicalPlan.attach2Task(tasks...) + p.SetStats(tasks[0].Plan().StatsInfo()) + return p.basePhysicalPlan.Attach2Task(tasks...) } -func (p *PhysicalApply) attach2Task(tasks ...task) task { - lTask := tasks[0].convertToRootTask(p.SCtx()) - rTask := tasks[1].convertToRootTask(p.SCtx()) - p.SetChildren(lTask.plan(), rTask.plan()) +func (p *PhysicalApply) Attach2Task(tasks ...Task) Task { + lTask := tasks[0].ConvertToRootTask(p.SCtx()) + rTask := tasks[1].ConvertToRootTask(p.SCtx()) + p.SetChildren(lTask.Plan(), rTask.Plan()) p.schema = BuildPhysicalJoinSchema(p.JoinType, p) - t := &rootTask{ - p: p, - } + t := &RootTask{} + t.SetPlan(p) return t } -func (p *PhysicalIndexMergeJoin) attach2Task(tasks ...task) task { +func (p *PhysicalIndexMergeJoin) Attach2Task(tasks ...Task) Task { innerTask := p.innerTask - outerTask := tasks[1-p.InnerChildIdx].convertToRootTask(p.SCtx()) + outerTask := tasks[1-p.InnerChildIdx].ConvertToRootTask(p.SCtx()) if p.InnerChildIdx == 1 { - p.SetChildren(outerTask.plan(), innerTask.plan()) + p.SetChildren(outerTask.Plan(), innerTask.Plan()) } else { - p.SetChildren(innerTask.plan(), outerTask.plan()) - } - t := &rootTask{ - p: p, + p.SetChildren(innerTask.Plan(), outerTask.Plan()) } + t := &RootTask{} + t.SetPlan(p) return t } -func (p *PhysicalIndexHashJoin) attach2Task(tasks ...task) task { +func (p *PhysicalIndexHashJoin) attach2Task(tasks ...Task) Task { innerTask := p.innerTask - outerTask := tasks[1-p.InnerChildIdx].convertToRootTask(p.SCtx()) + outerTask := tasks[1-p.InnerChildIdx].ConvertToRootTask(p.SCtx()) if p.InnerChildIdx == 1 { - p.SetChildren(outerTask.plan(), innerTask.plan()) + p.SetChildren(outerTask.Plan(), innerTask.Plan()) } else { - p.SetChildren(innerTask.plan(), outerTask.plan()) - } - t := &rootTask{ - p: p, + p.SetChildren(innerTask.Plan(), outerTask.Plan()) } + t := &RootTask{} + t.SetPlan(p) return t } -func (p *PhysicalIndexJoin) attach2Task(tasks ...task) task { +func (p *PhysicalIndexJoin) Attach2Task(tasks ...Task) Task { innerTask := p.innerTask - outerTask := tasks[1-p.InnerChildIdx].convertToRootTask(p.SCtx()) + outerTask := tasks[1-p.InnerChildIdx].ConvertToRootTask(p.SCtx()) if p.InnerChildIdx == 1 { - p.SetChildren(outerTask.plan(), innerTask.plan()) + p.SetChildren(outerTask.Plan(), innerTask.Plan()) } else { - p.SetChildren(innerTask.plan(), outerTask.plan()) - } - t := &rootTask{ - p: p, + p.SetChildren(innerTask.Plan(), outerTask.Plan()) } + t := &RootTask{} + t.SetPlan(p) return t } @@ -318,16 +296,15 @@ func getAvgRowSize(stats *property.StatsInfo, cols []*expression.Column) (size f return } -func (p *PhysicalHashJoin) attach2Task(tasks ...task) task { +func (p *PhysicalHashJoin) attach2Task(tasks ...Task) Task { if p.storeTp == kv.TiFlash { return p.attach2TaskForTiFlash(tasks...) } - lTask := tasks[0].convertToRootTask(p.SCtx()) - rTask := tasks[1].convertToRootTask(p.SCtx()) - p.SetChildren(lTask.plan(), rTask.plan()) - task := &rootTask{ - p: p, - } + lTask := tasks[0].ConvertToRootTask(p.SCtx()) + rTask := tasks[1].ConvertToRootTask(p.SCtx()) + p.SetChildren(lTask.Plan(), rTask.Plan()) + task := &RootTask{} + task.SetPlan(p) return task } @@ -482,7 +459,7 @@ func (p *PhysicalHashJoin) convertPartitionKeysIfNeed(lTask, rTask *mppTask) (*m } // if left or right child changes, we need to add enforcer. if lChanged { - nlTask := lTask.copy().(*mppTask) + nlTask := lTask.Copy().(*mppTask) nlTask.p = lProj nlTask = nlTask.enforceExchanger(&property.PhysicalProperty{ TaskTp: property.MppTaskType, @@ -492,7 +469,7 @@ func (p *PhysicalHashJoin) convertPartitionKeysIfNeed(lTask, rTask *mppTask) (*m lTask = nlTask } if rChanged { - nrTask := rTask.copy().(*mppTask) + nrTask := rTask.Copy().(*mppTask) nrTask.p = rProj nrTask = nrTask.enforceExchanger(&property.PhysicalProperty{ TaskTp: property.MppTaskType, @@ -504,7 +481,7 @@ func (p *PhysicalHashJoin) convertPartitionKeysIfNeed(lTask, rTask *mppTask) (*m return lTask, rTask } -func (p *PhysicalHashJoin) attach2TaskForMpp(tasks ...task) task { +func (p *PhysicalHashJoin) attach2TaskForMpp(tasks ...Task) Task { lTask, lok := tasks[0].(*mppTask) rTask, rok := tasks[1].(*mppTask) if !lok || !rok { @@ -517,7 +494,7 @@ func (p *PhysicalHashJoin) attach2TaskForMpp(tasks ...task) task { } lTask, rTask = p.convertPartitionKeysIfNeed(lTask, rTask) } - p.SetChildren(lTask.plan(), rTask.plan()) + p.SetChildren(lTask.Plan(), rTask.Plan()) p.schema = BuildPhysicalJoinSchema(p.JoinType, p) // outer task is the task that will pass its MPPPartitionType to the join result @@ -547,13 +524,13 @@ func (p *PhysicalHashJoin) attach2TaskForMpp(tasks ...task) task { return task } -func (p *PhysicalHashJoin) attach2TaskForTiFlash(tasks ...task) task { +func (p *PhysicalHashJoin) attach2TaskForTiFlash(tasks ...Task) Task { lTask, lok := tasks[0].(*copTask) rTask, rok := tasks[1].(*copTask) if !lok || !rok { return p.attach2TaskForMpp(tasks...) } - p.SetChildren(lTask.plan(), rTask.plan()) + p.SetChildren(lTask.Plan(), rTask.Plan()) p.schema = BuildPhysicalJoinSchema(p.JoinType, p) if !lTask.indexPlanFinished { lTask.finishIndexPlan() @@ -570,18 +547,17 @@ func (p *PhysicalHashJoin) attach2TaskForTiFlash(tasks ...task) task { return task } -func (p *PhysicalMergeJoin) attach2Task(tasks ...task) task { - lTask := tasks[0].convertToRootTask(p.SCtx()) - rTask := tasks[1].convertToRootTask(p.SCtx()) - p.SetChildren(lTask.plan(), rTask.plan()) - t := &rootTask{ - p: p, - } +func (p *PhysicalMergeJoin) attach2Task(tasks ...Task) Task { + lTask := tasks[0].ConvertToRootTask(p.SCtx()) + rTask := tasks[1].ConvertToRootTask(p.SCtx()) + p.SetChildren(lTask.Plan(), rTask.Plan()) + t := &RootTask{} + t.SetPlan(p) return t } -func buildIndexLookUpTask(ctx PlanContext, t *copTask) *rootTask { - newTask := &rootTask{} +func buildIndexLookUpTask(ctx PlanContext, t *copTask) *RootTask { + newTask := &RootTask{} p := PhysicalIndexLookUpReader{ tablePlan: t.tablePlan, indexPlan: t.indexPlan, @@ -608,9 +584,9 @@ func buildIndexLookUpTask(ctx PlanContext, t *copTask) *rootTask { proj := PhysicalProjection{Exprs: expression.Column2Exprs(schema.Columns)}.Init(ctx, p.StatsInfo(), t.tablePlan.QueryBlockOffset(), nil) proj.SetSchema(schema) proj.SetChildren(p) - newTask.p = proj + newTask.SetPlan(proj) } else { - newTask.p = p + newTask.SetPlan(p) } return newTask } @@ -649,16 +625,12 @@ func calcPagingCost(ctx PlanContext, indexPlan PhysicalPlan, expectCnt uint64) f return math.Max(pagingCst-sessVars.GetSeekFactor(nil), 0) } -func (t *rootTask) convertToRootTask(_ PlanContext) *rootTask { - return t.copy().(*rootTask) -} - -func (t *copTask) convertToRootTask(ctx PlanContext) *rootTask { +func (t *copTask) ConvertToRootTask(ctx PlanContext) *RootTask { // copy one to avoid changing itself. - return t.copy().(*copTask).convertToRootTaskImpl(ctx) + return t.Copy().(*copTask).convertToRootTaskImpl(ctx) } -func (t *copTask) convertToRootTaskImpl(ctx PlanContext) *rootTask { +func (t *copTask) convertToRootTaskImpl(ctx PlanContext) *RootTask { // copTasks are run in parallel, to make the estimated cost closer to execution time, we amortize // the cost to cop iterator workers. According to `CopClient::Send`, the concurrency // is Min(DistSQLScanConcurrency, numRegionsInvolvedInScan), since we cannot infer @@ -685,7 +657,7 @@ func (t *copTask) convertToRootTaskImpl(ctx PlanContext) *rootTask { t.originSchema = prevSchema } } - newTask := &rootTask{} + newTask := &RootTask{} if t.idxMergePartPlans != nil { p := PhysicalIndexMergeReader{ partialPlans: t.idxMergePartPlans, @@ -696,14 +668,14 @@ func (t *copTask) convertToRootTaskImpl(ctx PlanContext) *rootTask { }.Init(ctx, t.idxMergePartPlans[0].QueryBlockOffset()) p.PlanPartInfo = t.physPlanPartInfo setTableScanToTableRowIDScan(p.tablePlan) - newTask.p = p + newTask.SetPlan(p) t.handleRootTaskConds(ctx, newTask) if t.needExtraProj { schema := t.originSchema proj := PhysicalProjection{Exprs: expression.Column2Exprs(schema.Columns)}.Init(ctx, p.StatsInfo(), t.idxMergePartPlans[0].QueryBlockOffset(), nil) proj.SetSchema(schema) proj.SetChildren(p) - newTask.p = proj + newTask.SetPlan(proj) } return newTask } @@ -713,7 +685,7 @@ func (t *copTask) convertToRootTaskImpl(ctx PlanContext) *rootTask { p := PhysicalIndexReader{indexPlan: t.indexPlan}.Init(ctx, t.indexPlan.QueryBlockOffset()) p.PlanPartInfo = t.physPlanPartInfo p.SetStats(t.indexPlan.StatsInfo()) - newTask.p = p + newTask.SetPlan(p) } else { tp := t.tablePlan for len(tp.Children()) > 0 { @@ -749,9 +721,9 @@ func (t *copTask) convertToRootTaskImpl(ctx PlanContext) *rootTask { proj := PhysicalProjection{Exprs: expression.Column2Exprs(t.originSchema.Columns)}.Init(ts.SCtx(), ts.StatsInfo(), ts.QueryBlockOffset(), nil) proj.SetSchema(t.originSchema) proj.SetChildren(p) - newTask.p = proj + newTask.SetPlan(proj) } else { - newTask.p = p + newTask.SetPlan(p) } } @@ -759,17 +731,17 @@ func (t *copTask) convertToRootTaskImpl(ctx PlanContext) *rootTask { return newTask } -func (t *copTask) handleRootTaskConds(ctx PlanContext, newTask *rootTask) { +func (t *copTask) handleRootTaskConds(ctx PlanContext, newTask *RootTask) { if len(t.rootTaskConds) > 0 { selectivity, _, err := cardinality.Selectivity(ctx, t.tblColHists, t.rootTaskConds, nil) if err != nil { logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) selectivity = SelectionFactor } - sel := PhysicalSelection{Conditions: t.rootTaskConds}.Init(ctx, newTask.p.StatsInfo().Scale(selectivity), newTask.p.QueryBlockOffset()) + sel := PhysicalSelection{Conditions: t.rootTaskConds}.Init(ctx, newTask.GetPlan().StatsInfo().Scale(selectivity), newTask.GetPlan().QueryBlockOffset()) sel.fromDataSource = true - sel.SetChildren(newTask.p) - newTask.p = sel + sel.SetChildren(newTask.GetPlan()) + newTask.SetPlan(sel) } } @@ -784,40 +756,6 @@ func setTableScanToTableRowIDScan(p PhysicalPlan) { } } -// rootTask is the final sink node of a plan graph. It should be a single goroutine on tidb. -type rootTask struct { - p PhysicalPlan - isEmpty bool // isEmpty indicates if this task contains a dual table and returns empty data. - // TODO: The flag 'isEmpty' is only checked by Projection and UnionAll. We should support more cases in the future. -} - -func (t *rootTask) copy() task { - return &rootTask{ - p: t.p, - } -} - -func (t *rootTask) count() float64 { - return t.p.StatsInfo().RowCount -} - -func (t *rootTask) plan() PhysicalPlan { - return t.p -} - -// MemoryUsage return the memory usage of rootTask -func (t *rootTask) MemoryUsage() (sum int64) { - if t == nil { - return - } - - sum = size.SizeOfInterface + size.SizeOfBool - if t.p != nil { - sum += t.p.MemoryUsage() - } - return sum -} - // attach2Task attach limit to different cases. // For Normal Index Lookup // 1: attach the limit to table side or index side of normal index lookup cop task. (normal case, old code, no more @@ -834,8 +772,8 @@ func (t *rootTask) MemoryUsage() (sum int64) { // // 4: attach the limit to the TOP of root index merge operator if there is some root condition exists for index merge // intersection/union case. -func (p *PhysicalLimit) attach2Task(tasks ...task) task { - t := tasks[0].copy() +func (p *PhysicalLimit) attach2Task(tasks ...Task) Task { + t := tasks[0].Copy() newPartitionBy := make([]property.SortItem, 0, len(p.GetPartitionBy())) for _, expr := range p.GetPartitionBy() { newPartitionBy = append(newPartitionBy, expr.Clone()) @@ -853,7 +791,7 @@ func (p *PhysicalLimit) attach2Task(tasks ...task) task { cop.tablePlan = pushedDownLimit // Don't use clone() so that Limit and its children share the same schema. Otherwise, the virtual generated column may not be resolved right. pushedDownLimit.SetSchema(pushedDownLimit.children[0].Schema()) - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) } if len(cop.idxMergePartPlans) == 0 { // For double read which requires order being kept, the limit cannot be pushed down to the table side, @@ -861,7 +799,7 @@ func (p *PhysicalLimit) attach2Task(tasks ...task) task { if (!cop.keepOrder || !cop.indexPlanFinished || cop.indexPlan == nil) && len(cop.rootTaskConds) == 0 { // When limit is pushed down, we should remove its offset. newCount := p.Offset + p.Count - childProfile := cop.plan().StatsInfo() + childProfile := cop.Plan().StatsInfo() // Strictly speaking, for the row count of stats, we should multiply newCount with "regionNum", // but "regionNum" is unknown since the copTask can be a double read, so we ignore it now. stats := deriveLimitStats(childProfile, float64(newCount)) @@ -870,7 +808,7 @@ func (p *PhysicalLimit) attach2Task(tasks ...task) task { // Don't use clone() so that Limit and its children share the same schema. Otherwise the virtual generated column may not be resolved right. pushedDownLimit.SetSchema(pushedDownLimit.children[0].Schema()) } - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) sunk = p.sinkIntoIndexLookUp(t) } else if !cop.idxMergeIsIntersection { // We only support push part of the order prop down to index merge build case. @@ -891,12 +829,12 @@ func (p *PhysicalLimit) attach2Task(tasks ...task) task { limitChildren = append(limitChildren, pushedDownLimit) } cop.idxMergePartPlans = limitChildren - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) sunk = p.sinkIntoIndexMerge(t) } } else { // when there are some root conditions, just sink the limit upon the index merge reader. - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) sunk = p.sinkIntoIndexMerge(t) } } else if cop.idxMergeIsIntersection { @@ -909,26 +847,26 @@ func (p *PhysicalLimit) attach2Task(tasks ...task) task { // indicates the table side is not a pure table-scan, so we could only append the limit upon the table plan. suspendLimitAboveTablePlan() } else { - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) sunk = p.sinkIntoIndexMerge(t) } } else { // Otherwise, suspend the limit out of index merge reader. - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) sunk = p.sinkIntoIndexMerge(t) } } else { // Whatever the remained case is, we directly convert to it to root task. - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) } } else if mpp, ok := t.(*mppTask); ok { newCount := p.Offset + p.Count - childProfile := mpp.plan().StatsInfo() + childProfile := mpp.Plan().StatsInfo() stats := deriveLimitStats(childProfile, float64(newCount)) pushedDownLimit := PhysicalLimit{Count: newCount, PartitionBy: newPartitionBy}.Init(p.SCtx(), stats, p.QueryBlockOffset()) mpp = attachPlan2Task(pushedDownLimit, mpp).(*mppTask) pushedDownLimit.SetSchema(pushedDownLimit.children[0].Schema()) - t = mpp.convertToRootTask(p.SCtx()) + t = mpp.ConvertToRootTask(p.SCtx()) } if sunk { return t @@ -941,10 +879,10 @@ func (p *PhysicalLimit) attach2Task(tasks ...task) task { return attachPlan2Task(p, t) } -func (p *PhysicalLimit) sinkIntoIndexLookUp(t task) bool { - root := t.(*rootTask) - reader, isDoubleRead := root.p.(*PhysicalIndexLookUpReader) - proj, isProj := root.p.(*PhysicalProjection) +func (p *PhysicalLimit) sinkIntoIndexLookUp(t Task) bool { + root := t.(*RootTask) + reader, isDoubleRead := root.GetPlan().(*PhysicalIndexLookUpReader) + proj, isProj := root.GetPlan().(*PhysicalProjection) if !isDoubleRead && !isProj { return false } @@ -972,8 +910,8 @@ func (p *PhysicalLimit) sinkIntoIndexLookUp(t task) bool { }.Init(p.SCtx(), p.StatsInfo(), p.QueryBlockOffset(), nil) extraProj.SetSchema(p.schema) // If the root.p is already a Projection. We left the optimization for the later Projection Elimination. - extraProj.SetChildren(root.p) - root.p = extraProj + extraProj.SetChildren(root.GetPlan()) + root.SetPlan(extraProj) } reader.PushedLimit = &PushedDownLimit{ @@ -993,10 +931,10 @@ func (p *PhysicalLimit) sinkIntoIndexLookUp(t task) bool { return true } -func (p *PhysicalLimit) sinkIntoIndexMerge(t task) bool { - root := t.(*rootTask) - imReader, isIm := root.p.(*PhysicalIndexMergeReader) - proj, isProj := root.p.(*PhysicalProjection) +func (p *PhysicalLimit) sinkIntoIndexMerge(t Task) bool { + root := t.(*RootTask) + imReader, isIm := root.GetPlan().(*PhysicalIndexMergeReader) + proj, isProj := root.GetPlan().(*PhysicalProjection) if !isIm && !isProj { return false } @@ -1025,10 +963,10 @@ func (p *PhysicalLimit) sinkIntoIndexMerge(t task) bool { ts.StatsInfo().RowCount = originStats.RowCount } } - needProj := p.schema.Len() != root.p.Schema().Len() + needProj := p.schema.Len() != root.GetPlan().Schema().Len() if !needProj { for i := 0; i < p.schema.Len(); i++ { - if !p.schema.Columns[i].EqualColumn(root.p.Schema().Columns[i]) { + if !p.schema.Columns[i].EqualColumn(root.GetPlan().Schema().Columns[i]) { needProj = true break } @@ -1040,23 +978,23 @@ func (p *PhysicalLimit) sinkIntoIndexMerge(t task) bool { }.Init(p.SCtx(), p.StatsInfo(), p.QueryBlockOffset(), nil) extraProj.SetSchema(p.schema) // If the root.p is already a Projection. We left the optimization for the later Projection Elimination. - extraProj.SetChildren(root.p) - root.p = extraProj + extraProj.SetChildren(root.GetPlan()) + root.SetPlan(extraProj) } return true } -func (p *PhysicalSort) attach2Task(tasks ...task) task { - t := tasks[0].copy() +func (p *PhysicalSort) attach2Task(tasks ...Task) Task { + t := tasks[0].Copy() t = attachPlan2Task(p, t) return t } -func (p *NominalSort) attach2Task(tasks ...task) task { +func (p *NominalSort) attach2Task(tasks ...Task) Task { if p.OnlyColumn { return tasks[0] } - t := tasks[0].copy() + t := tasks[0].Copy() t = attachPlan2Task(p, t) return t } @@ -1149,7 +1087,7 @@ func (p *PhysicalTopN) canPushDownToTiKV(copTask *copTask) bool { return false } } - } else if p.containVirtualColumn(copTask.plan().Schema().Columns) { + } else if p.containVirtualColumn(copTask.Plan().Schema().Columns) { return false } return true @@ -1160,14 +1098,14 @@ func (p *PhysicalTopN) canPushDownToTiFlash(mppTask *mppTask) bool { if !p.canExpressionConvertedToPB(kv.TiFlash) { return false } - if p.containVirtualColumn(mppTask.plan().Schema().Columns) { + if p.containVirtualColumn(mppTask.Plan().Schema().Columns) { return false } return true } -func (p *PhysicalTopN) attach2Task(tasks ...task) task { - t := tasks[0].copy() +func (p *PhysicalTopN) attach2Task(tasks ...Task) Task { + t := tasks[0].Copy() cols := make([]*expression.Column, 0, len(p.ByItems)) for _, item := range p.ByItems { cols = append(cols, expression.ExtractColumns(item.Expr)...) @@ -1190,7 +1128,7 @@ func (p *PhysicalTopN) attach2Task(tasks ...task) task { pushedDownTopN := p.getPushedDownTopN(mppTask.p) mppTask.p = pushedDownTopN } - rootTask := t.convertToRootTask(p.SCtx()) + rootTask := t.ConvertToRootTask(p.SCtx()) // Skip TopN with partition on the root. This is a derived topN and window function // will take care of the filter. if len(p.GetPartitionBy()) > 0 { @@ -1199,8 +1137,8 @@ func (p *PhysicalTopN) attach2Task(tasks ...task) task { return attachPlan2Task(p, rootTask) } -func (p *PhysicalExpand) attach2Task(tasks ...task) task { - t := tasks[0].copy() +func (p *PhysicalExpand) attach2Task(tasks ...Task) Task { + t := tasks[0].Copy() // current expand can only be run in MPP TiFlash mode. if mpp, ok := t.(*mppTask); ok { p.SetChildren(mpp.p) @@ -1210,8 +1148,8 @@ func (p *PhysicalExpand) attach2Task(tasks ...task) task { return invalidTask } -func (p *PhysicalProjection) attach2Task(tasks ...task) task { - t := tasks[0].copy() +func (p *PhysicalProjection) attach2Task(tasks ...Task) Task { + t := tasks[0].Copy() if cop, ok := t.(*copTask); ok { if (len(cop.rootTaskConds) == 0 && len(cop.idxMergePartPlans) == 0) && expression.CanExprsPushDown(GetPushDownCtx(p.SCtx()), p.Exprs, cop.getStoreType()) { copTask := attachPlan2Task(p, cop) @@ -1224,21 +1162,21 @@ func (p *PhysicalProjection) attach2Task(tasks ...task) task { return mpp } } - t = t.convertToRootTask(p.SCtx()) + t = t.ConvertToRootTask(p.SCtx()) t = attachPlan2Task(p, t) - if root, ok := tasks[0].(*rootTask); ok && root.isEmpty { - t.(*rootTask).isEmpty = true + if root, ok := tasks[0].(*RootTask); ok && root.IsEmpty() { + t.(*RootTask).SetEmpty(true) } return t } -func (p *PhysicalUnionAll) attach2MppTasks(tasks ...task) task { +func (p *PhysicalUnionAll) attach2MppTasks(tasks ...Task) Task { t := &mppTask{p: p} childPlans := make([]PhysicalPlan, 0, len(tasks)) for _, tk := range tasks { - if mpp, ok := tk.(*mppTask); ok && !tk.invalid() { - childPlans = append(childPlans, mpp.plan()) - } else if root, ok := tk.(*rootTask); ok && root.isEmpty { + if mpp, ok := tk.(*mppTask); ok && !tk.Invalid() { + childPlans = append(childPlans, mpp.Plan()) + } else if root, ok := tk.(*RootTask); ok && root.IsEmpty() { continue } else { return invalidTask @@ -1251,7 +1189,7 @@ func (p *PhysicalUnionAll) attach2MppTasks(tasks ...task) task { return t } -func (p *PhysicalUnionAll) attach2Task(tasks ...task) task { +func (p *PhysicalUnionAll) attach2Task(tasks ...Task) Task { for _, t := range tasks { if _, ok := t.(*mppTask); ok { if p.TP() == plancodec.TypePartitionUnion { @@ -1263,23 +1201,24 @@ func (p *PhysicalUnionAll) attach2Task(tasks ...task) task { return p.attach2MppTasks(tasks...) } } - t := &rootTask{p: p} + t := &RootTask{} + t.SetPlan(p) childPlans := make([]PhysicalPlan, 0, len(tasks)) for _, task := range tasks { - task = task.convertToRootTask(p.SCtx()) - childPlans = append(childPlans, task.plan()) + task = task.ConvertToRootTask(p.SCtx()) + childPlans = append(childPlans, task.Plan()) } p.SetChildren(childPlans...) return t } -func (sel *PhysicalSelection) attach2Task(tasks ...task) task { +func (sel *PhysicalSelection) attach2Task(tasks ...Task) Task { if mppTask, _ := tasks[0].(*mppTask); mppTask != nil { // always push to mpp task. if expression.CanExprsPushDown(GetPushDownCtx(sel.SCtx()), sel.Conditions, kv.TiFlash) { - return attachPlan2Task(sel, mppTask.copy()) + return attachPlan2Task(sel, mppTask.Copy()) } } - t := tasks[0].convertToRootTask(sel.SCtx()) + t := tasks[0].ConvertToRootTask(sel.SCtx()) return attachPlan2Task(sel, t) } @@ -1927,8 +1866,8 @@ func computePartialCursorOffset(name string) int { return offset } -func (p *PhysicalStreamAgg) attach2Task(tasks ...task) task { - t := tasks[0].copy() +func (p *PhysicalStreamAgg) attach2Task(tasks ...Task) Task { + t := tasks[0].Copy() if cop, ok := t.(*copTask); ok { // We should not push agg down across // 1. double read, since the data of second read is ordered by handle instead of index. The `extraHandleCol` is added @@ -1937,7 +1876,7 @@ func (p *PhysicalStreamAgg) attach2Task(tasks ...task) task { // 2. the case that there's filters should be calculated on TiDB side. // 3. the case of index merge if (cop.indexPlan != nil && cop.tablePlan != nil && cop.keepOrder) || len(cop.rootTaskConds) > 0 || len(cop.idxMergePartPlans) > 0 { - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) attachPlan2Task(p, t) } else { storeType := cop.getStoreType() @@ -1963,11 +1902,11 @@ func (p *PhysicalStreamAgg) attach2Task(tasks ...task) task { cop.indexPlan = partialAgg } } - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) attachPlan2Task(finalAgg, t) } } else if mpp, ok := t.(*mppTask); ok { - t = mpp.convertToRootTask(p.SCtx()) + t = mpp.ConvertToRootTask(p.SCtx()) attachPlan2Task(p, t) } else { attachPlan2Task(p, t) @@ -1993,7 +1932,7 @@ func (p *PhysicalHashAgg) cpuCostDivisor(hasDistinct bool) (divisor, con float64 return math.Min(float64(finalCon), float64(partialCon)), float64(finalCon + partialCon) } -func (p *PhysicalHashAgg) attach2TaskForMpp1Phase(mpp *mppTask) task { +func (p *PhysicalHashAgg) attach2TaskForMpp1Phase(mpp *mppTask) Task { // 1-phase agg: when the partition columns can be satisfied, where the plan does not need to enforce Exchange // only push down the original agg proj := p.convertAvgForMPP() @@ -2303,8 +2242,8 @@ func (p *PhysicalHashAgg) adjust3StagePhaseAgg(partialAgg, finalAgg PhysicalPlan return finalHashAgg, middleHashAgg, partialHashAgg, proj4Partial, nil } -func (p *PhysicalHashAgg) attach2TaskForMpp(tasks ...task) task { - t := tasks[0].copy() +func (p *PhysicalHashAgg) attach2TaskForMpp(tasks ...Task) Task { + t := tasks[0].Copy() mpp, ok := t.(*mppTask) if !ok { return invalidTask @@ -2344,7 +2283,7 @@ func (p *PhysicalHashAgg) attach2TaskForMpp(tasks ...task) task { } prop := &property.PhysicalProperty{TaskTp: property.MppTaskType, ExpectedCnt: math.MaxFloat64, MPPPartitionTp: property.HashType, MPPPartitionCols: partitionCols} newMpp := mpp.enforceExchangerImpl(prop) - if newMpp.invalid() { + if newMpp.Invalid() { return newMpp } attachPlan2Task(finalAgg, newMpp) @@ -2358,7 +2297,7 @@ func (p *PhysicalHashAgg) attach2TaskForMpp(tasks ...task) task { if partialAgg != nil { attachPlan2Task(partialAgg, mpp) } - t = mpp.convertToRootTask(p.SCtx()) + t = mpp.ConvertToRootTask(p.SCtx()) attachPlan2Task(finalAgg, t) return t case MppScalar: @@ -2430,8 +2369,8 @@ func (p *PhysicalHashAgg) attach2TaskForMpp(tasks ...task) task { } } -func (p *PhysicalHashAgg) attach2Task(tasks ...task) task { - t := tasks[0].copy() +func (p *PhysicalHashAgg) attach2Task(tasks ...Task) Task { + t := tasks[0].Copy() if cop, ok := t.(*copTask); ok { if len(cop.rootTaskConds) == 0 && len(cop.idxMergePartPlans) == 0 { copTaskType := cop.getStoreType() @@ -2459,10 +2398,10 @@ func (p *PhysicalHashAgg) attach2Task(tasks ...task) task { // column may be independent of the column used for region distribution, so a closer // estimation of network cost for hash aggregation may multiply the number of // regions involved in the `partialAgg`, which is unknown however. - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) attachPlan2Task(finalAgg, t) } else { - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) attachPlan2Task(p, t) } } else if _, ok := t.(*mppTask); ok { @@ -2473,15 +2412,15 @@ func (p *PhysicalHashAgg) attach2Task(tasks ...task) task { return t } -func (p *PhysicalWindow) attach2TaskForMPP(mpp *mppTask) task { +func (p *PhysicalWindow) attach2TaskForMPP(mpp *mppTask) Task { // FIXME: currently, tiflash's join has different schema with TiDB, // so we have to rebuild the schema of join and operators which may inherit schema from join. // for window, we take the sub-plan's schema, and the schema generated by windowDescs. columns := p.Schema().Clone().Columns[len(p.Schema().Columns)-len(p.WindowFuncDescs):] - p.schema = expression.MergeSchema(mpp.plan().Schema(), expression.NewSchema(columns...)) + p.schema = expression.MergeSchema(mpp.Plan().Schema(), expression.NewSchema(columns...)) failpoint.Inject("CheckMPPWindowSchemaLength", func() { - if len(p.Schema().Columns) != len(mpp.plan().Schema().Columns)+len(p.WindowFuncDescs) { + if len(p.Schema().Columns) != len(mpp.Plan().Schema().Columns)+len(p.WindowFuncDescs) { panic("mpp physical window has incorrect schema length") } }) @@ -2489,18 +2428,18 @@ func (p *PhysicalWindow) attach2TaskForMPP(mpp *mppTask) task { return attachPlan2Task(p, mpp) } -func (p *PhysicalWindow) attach2Task(tasks ...task) task { - if mpp, ok := tasks[0].copy().(*mppTask); ok && p.storeTp == kv.TiFlash { +func (p *PhysicalWindow) attach2Task(tasks ...Task) Task { + if mpp, ok := tasks[0].Copy().(*mppTask); ok && p.storeTp == kv.TiFlash { return p.attach2TaskForMPP(mpp) } - t := tasks[0].convertToRootTask(p.SCtx()) + t := tasks[0].ConvertToRootTask(p.SCtx()) return attachPlan2Task(p.self, t) } -func (p *PhysicalCTEStorage) attach2Task(tasks ...task) task { - t := tasks[0].copy() +func (p *PhysicalCTEStorage) attach2Task(tasks ...Task) Task { + t := tasks[0].Copy() if mpp, ok := t.(*mppTask); ok { - p.SetChildren(t.plan()) + p.SetChildren(t.Plan()) return &mppTask{ p: p, partTp: mpp.partTp, @@ -2508,14 +2447,14 @@ func (p *PhysicalCTEStorage) attach2Task(tasks ...task) task { tblColHists: mpp.tblColHists, } } - t.convertToRootTask(p.SCtx()) - p.SetChildren(t.plan()) - return &rootTask{ - p: p, - } + t.ConvertToRootTask(p.SCtx()) + p.SetChildren(t.Plan()) + ta := &RootTask{} + ta.SetPlan(p) + return ta } -func (p *PhysicalSequence) attach2Task(tasks ...task) task { +func (p *PhysicalSequence) attach2Task(tasks ...Task) Task { for _, t := range tasks { _, isMpp := t.(*mppTask) if !isMpp { @@ -2527,7 +2466,7 @@ func (p *PhysicalSequence) attach2Task(tasks ...task) task { children := make([]PhysicalPlan, 0, len(tasks)) for _, t := range tasks { - children = append(children, t.plan()) + children = append(children, t.Plan()) } p.SetChildren(children...) @@ -2566,25 +2505,25 @@ type mppTask struct { tblColHists *statistics.HistColl } -func (t *mppTask) count() float64 { +func (t *mppTask) Count() float64 { return t.p.StatsInfo().RowCount } -func (t *mppTask) copy() task { +func (t *mppTask) Copy() Task { nt := *t return &nt } -func (t *mppTask) plan() PhysicalPlan { +func (t *mppTask) Plan() PhysicalPlan { return t.p } -func (t *mppTask) invalid() bool { +func (t *mppTask) Invalid() bool { return t.p == nil } -func (t *mppTask) convertToRootTask(ctx PlanContext) *rootTask { - return t.copy().(*mppTask).convertToRootTaskImpl(ctx) +func (t *mppTask) ConvertToRootTask(ctx PlanContext) *RootTask { + return t.Copy().(*mppTask).ConvertToRootTaskImpl(ctx) } // MemoryUsage return the memory usage of mppTask @@ -2638,7 +2577,7 @@ func tryExpandVirtualColumn(p PhysicalPlan) { } } -func (t *mppTask) convertToRootTaskImpl(ctx PlanContext) *rootTask { +func (t *mppTask) ConvertToRootTaskImpl(ctx PlanContext) *RootTask { // In disaggregated-tiflash mode, need to consider generated column. tryExpandVirtualColumn(t.p) sender := PhysicalExchangeSender{ @@ -2652,9 +2591,8 @@ func (t *mppTask) convertToRootTaskImpl(ctx PlanContext) *rootTask { }.Init(ctx, t.p.QueryBlockOffset()) p.SetStats(t.p.StatsInfo()) collectPartitionInfosFromMPPPlan(p, t.p) - rt := &rootTask{ - p: p, - } + rt := &RootTask{} + rt.SetPlan(p) if len(t.rootTaskConds) > 0 { // Some Filter cannot be pushed down to TiFlash, need to add Selection in rootTask, @@ -2676,10 +2614,10 @@ func (t *mppTask) convertToRootTaskImpl(ctx PlanContext) *rootTask { logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) selectivity = SelectionFactor } - sel := PhysicalSelection{Conditions: t.rootTaskConds}.Init(ctx, rt.p.StatsInfo().Scale(selectivity), rt.p.QueryBlockOffset()) + sel := PhysicalSelection{Conditions: t.rootTaskConds}.Init(ctx, rt.GetPlan().StatsInfo().Scale(selectivity), rt.GetPlan().QueryBlockOffset()) sel.fromDataSource = true - sel.SetChildren(rt.p) - rt.p = sel + sel.SetChildren(rt.GetPlan()) + rt.SetPlan(sel) } return rt } @@ -2715,7 +2653,7 @@ func (t *mppTask) enforceExchanger(prop *property.PhysicalProperty) *mppTask { if !t.needEnforceExchanger(prop) { return t } - return t.copy().(*mppTask).enforceExchangerImpl(prop) + return t.Copy().(*mppTask).enforceExchangerImpl(prop) } func (t *mppTask) enforceExchangerImpl(prop *property.PhysicalProperty) *mppTask { diff --git a/pkg/planner/core/task_base.go b/pkg/planner/core/task_base.go new file mode 100644 index 0000000000000..fef9b6f38b171 --- /dev/null +++ b/pkg/planner/core/task_base.go @@ -0,0 +1,77 @@ +package core + +import ( + "github.com/pingcap/tidb/pkg/util/size" +) + +var ( + _ Task = &RootTask{} +) + +// Task is a new version of `PhysicalPlanInfo`. It stores cost information for a task. +// A task may be CopTask, RootTask, MPPTaskMeta or a ParallelTask. +type Task interface { + Count() float64 + Copy() Task + Plan() PhysicalPlan + Invalid() bool + ConvertToRootTask(ctx PlanContext) *RootTask + MemoryUsage() int64 +} + +// rootTask is the final sink node of a plan graph. It should be a single goroutine on tidb. +type RootTask struct { + p PhysicalPlan + isEmpty bool // isEmpty indicates if this task contains a dual table and returns empty data. + // TODO: The flag 'isEmpty' is only checked by Projection and UnionAll. We should support more cases in the future. +} + +func (t *RootTask) GetPlan() PhysicalPlan { + return t.p +} + +func (t *RootTask) SetPlan(p PhysicalPlan) { + t.p = p +} + +func (t *RootTask) IsEmpty() bool { + return t.isEmpty +} + +func (t *RootTask) SetEmpty(x bool) { + t.isEmpty = x +} + +func (t *RootTask) Copy() Task { + return &RootTask{ + p: t.p, + } +} + +func (t *RootTask) ConvertToRootTask(_ PlanContext) *RootTask { + return t.Copy().(*RootTask) +} + +func (t *RootTask) Invalid() bool { + return t.p == nil +} + +func (t *RootTask) Count() float64 { + return t.p.StatsInfo().RowCount +} + +func (t *RootTask) Plan() PhysicalPlan { + return t.p +} + +// MemoryUsage return the memory usage of rootTask +func (t *RootTask) MemoryUsage() (sum int64) { + if t == nil { + return + } + sum = size.SizeOfInterface + size.SizeOfBool + if t.p != nil { + sum += t.p.MemoryUsage() + } + return sum +} diff --git a/pkg/planner/core/util.go b/pkg/planner/core/util.go index 349cd9005abca..4653021b39a44 100644 --- a/pkg/planner/core/util.go +++ b/pkg/planner/core/util.go @@ -16,6 +16,7 @@ package core import ( "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "slices" "strings" @@ -25,7 +26,6 @@ import ( "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core/internal/base" - "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/tablecodec" @@ -137,7 +137,7 @@ func (s *logicalSchemaProducer) setSchemaAndNames(schema *expression.Schema, nam } // inlineProjection prunes unneeded columns inline a executor. -func (s *logicalSchemaProducer) inlineProjection(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) { +func (s *logicalSchemaProducer) inlineProjection(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) { prunedColumns := make([]*expression.Column, 0) used := expression.GetUsedList(s.SCtx().GetExprCtx(), parentUsedCols, s.Schema()) for i := len(used) - 1; i >= 0; i-- { diff --git a/pkg/planner/util/coreusage/costMisc.go b/pkg/planner/util/coreusage/costMisc.go new file mode 100644 index 0000000000000..cfdb599878f0a --- /dev/null +++ b/pkg/planner/util/coreusage/costMisc.go @@ -0,0 +1,128 @@ +package coreusage + +import ( + "fmt" + "strconv" +) + +const ( + // CostFlagRecalculate indicates the optimizer to ignore cached cost and recalculate it again. + CostFlagRecalculate uint64 = 1 << iota + + // CostFlagUseTrueCardinality indicates the optimizer to use true cardinality to calculate the cost. + CostFlagUseTrueCardinality + + // CostFlagTrace indicates whether to trace the cost calculation. + CostFlagTrace +) + +type CostVer2 struct { + cost float64 + trace *CostTrace +} + +func (c *CostVer2) GetCost() float64 { + return c.cost +} + +func (c *CostVer2) GetTrace() *CostTrace { + return c.trace +} + +type CostTrace struct { + factorCosts map[string]float64 // map[factorName]cost, used to calibrate the cost model + formula string // It used to trace the cost calculation. +} + +func (c *CostTrace) GetFormula() string { + return c.formula +} + +func (c *CostTrace) GetFactorCosts() map[string]float64 { + return c.factorCosts +} + +func NewZeroCostVer2(trace bool) (ret CostVer2) { + if trace { + ret.trace = &CostTrace{make(map[string]float64), ""} + } + return +} + +func hasCostFlag(costFlag, flag uint64) bool { + return (costFlag & flag) > 0 +} + +func TraceCost(option *PlanCostOption) bool { + if option != nil && hasCostFlag(option.CostFlag, CostFlagTrace) { + return true + } + return false +} + +func NewCostVer2(option *PlanCostOption, factor CostVer2Factor, cost float64, lazyFormula func() string) (ret CostVer2) { + ret.cost = cost + if TraceCost(option) { + ret.trace = &CostTrace{make(map[string]float64), ""} + ret.trace.factorCosts[factor.Name] = cost + ret.trace.formula = lazyFormula() + } + return ret +} + +type CostVer2Factor struct { + Name string + Value float64 +} + +func (f CostVer2Factor) String() string { + return fmt.Sprintf("%s(%v)", f.Name, f.Value) +} + +func SumCostVer2(costs ...CostVer2) (ret CostVer2) { + if len(costs) == 0 { + return + } + for _, c := range costs { + ret.cost += c.cost + if c.trace != nil { + if ret.trace == nil { // init + ret.trace = &CostTrace{make(map[string]float64), ""} + } + for factor, factorCost := range c.trace.factorCosts { + ret.trace.factorCosts[factor] += factorCost + } + if ret.trace.formula != "" { + ret.trace.formula += " + " + } + ret.trace.formula += "(" + c.trace.formula + ")" + } + } + return ret +} + +func DivCostVer2(cost CostVer2, denominator float64) (ret CostVer2) { + ret.cost = cost.cost / denominator + if cost.trace != nil { + ret.trace = &CostTrace{make(map[string]float64), ""} + for f, c := range cost.trace.factorCosts { + ret.trace.factorCosts[f] = c / denominator + } + ret.trace.formula = "(" + cost.trace.formula + ")/" + strconv.FormatFloat(denominator, 'f', 2, 64) + } + return ret +} + +func MulCostVer2(cost CostVer2, scale float64) (ret CostVer2) { + ret.cost = cost.cost * scale + if cost.trace != nil { + ret.trace = &CostTrace{make(map[string]float64), ""} + for f, c := range cost.trace.factorCosts { + ret.trace.factorCosts[f] = c * scale + } + ret.trace.formula = "(" + cost.trace.formula + ")*" + strconv.FormatFloat(scale, 'f', 2, 64) + } + return ret +} + +var ZeroCostVer2 = NewZeroCostVer2(false) diff --git a/pkg/planner/util/optTracer.go b/pkg/planner/util/coreusage/optTracer.go similarity index 58% rename from pkg/planner/util/optTracer.go rename to pkg/planner/util/coreusage/optTracer.go index 79fa275f93ff8..f1df95cc1df37 100644 --- a/pkg/planner/util/optTracer.go +++ b/pkg/planner/util/coreusage/optTracer.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package util +package coreusage import "github.com/pingcap/tidb/pkg/util/tracing" @@ -20,6 +20,7 @@ import "github.com/pingcap/tidb/pkg/util/tracing" // logicalOptRule inside the accommodated pkg `util` should only be depended on by logical `rule` pkg. // // rule related -----> core/util +//**************************************** below logical optimize trace related ****************************************** // LogicalOptimizeOp is logical optimizing option for tracing. type LogicalOptimizeOp struct { @@ -66,3 +67,64 @@ func (op *LogicalOptimizeOp) RecordFinalLogicalPlan(build func() *tracing.PlanTr } op.tracer.RecordFinalLogicalPlan(build()) } + +//**************************************** below physical optimize trace related ****************************************** + +// PhysicalOptimizeOp is logical optimizing option for tracing. +type PhysicalOptimizeOp struct { + // tracer is goring to track optimize steps during physical optimizing + tracer *tracing.PhysicalOptimizeTracer +} + +func DefaultPhysicalOptimizeOption() *PhysicalOptimizeOp { + return &PhysicalOptimizeOp{} +} + +func (op *PhysicalOptimizeOp) WithEnableOptimizeTracer(tracer *tracing.PhysicalOptimizeTracer) *PhysicalOptimizeOp { + op.tracer = tracer + return op +} + +func (op *PhysicalOptimizeOp) AppendCandidate(c *tracing.CandidatePlanTrace) { + op.tracer.AppendCandidate(c) +} + +func (op *PhysicalOptimizeOp) GetTracer() *tracing.PhysicalOptimizeTracer { + return op.tracer +} + +// NewDefaultPlanCostOption returns PlanCostOption +func NewDefaultPlanCostOption() *PlanCostOption { + return &PlanCostOption{} +} + +// PlanCostOption indicates option during GetPlanCost +type PlanCostOption struct { + CostFlag uint64 + tracer *PhysicalOptimizeOp +} + +func (op *PlanCostOption) GetTracer() *PhysicalOptimizeOp { + return op.tracer +} + +// WithCostFlag set cost flag +func (op *PlanCostOption) WithCostFlag(flag uint64) *PlanCostOption { + if op == nil { + return nil + } + op.CostFlag = flag + return op +} + +// WithOptimizeTracer set tracer +func (op *PlanCostOption) WithOptimizeTracer(v *PhysicalOptimizeOp) *PlanCostOption { + if op == nil { + return nil + } + op.tracer = v + if v != nil && v.tracer != nil { + op.CostFlag |= CostFlagTrace + } + return op +}