010-dmaengine-Add-transfer-termination-synchronization-s.patch 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. From 7bd903c5ca47fde5ad52370a47776491813c772e Mon Sep 17 00:00:00 2001
  2. From: Peter Ujfalusi <peter.ujfalusi@ti.com>
  3. Date: Mon, 14 Dec 2015 22:47:39 +0200
  4. Subject: [PATCH 1/3] dmaengine: core: Move and merge the code paths using
  5. private_candidate
  6. Channel matching with private_candidate() is used in two paths, the error
  7. checking is slightly different in them and they are duplicating code also.
  8. Move the code under find_candidate() to provide consistent execution and
  9. going to allow us to reuse this mode of channel lookup later.
  10. Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
  11. Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
  12. Reviewed-by: Arnd Bergmann <arnd@arndb.de>
  13. Signed-off-by: Vinod Koul <vinod.koul@intel.com>
  14. ---
  15. drivers/dma/dmaengine.c | 81 +++++++++++++++++++++++++------------------------
  16. 1 file changed, 42 insertions(+), 39 deletions(-)
  17. --- a/drivers/dma/dmaengine.c
  18. +++ b/drivers/dma/dmaengine.c
  19. @@ -542,6 +542,42 @@ static struct dma_chan *private_candidat
  20. return NULL;
  21. }
  22. +static struct dma_chan *find_candidate(struct dma_device *device,
  23. + const dma_cap_mask_t *mask,
  24. + dma_filter_fn fn, void *fn_param)
  25. +{
  26. + struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
  27. + int err;
  28. +
  29. + if (chan) {
  30. + /* Found a suitable channel, try to grab, prep, and return it.
  31. + * We first set DMA_PRIVATE to disable balance_ref_count as this
  32. + * channel will not be published in the general-purpose
  33. + * allocator
  34. + */
  35. + dma_cap_set(DMA_PRIVATE, device->cap_mask);
  36. + device->privatecnt++;
  37. + err = dma_chan_get(chan);
  38. +
  39. + if (err) {
  40. + if (err == -ENODEV) {
  41. + pr_debug("%s: %s module removed\n", __func__,
  42. + dma_chan_name(chan));
  43. + list_del_rcu(&device->global_node);
  44. + } else
  45. + pr_debug("%s: failed to get %s: (%d)\n",
  46. + __func__, dma_chan_name(chan), err);
  47. +
  48. + if (--device->privatecnt == 0)
  49. + dma_cap_clear(DMA_PRIVATE, device->cap_mask);
  50. +
  51. + chan = ERR_PTR(err);
  52. + }
  53. + }
  54. +
  55. + return chan ? chan : ERR_PTR(-EPROBE_DEFER);
  56. +}
  57. +
  58. /**
  59. * dma_get_slave_channel - try to get specific channel exclusively
  60. * @chan: target channel
  61. @@ -580,7 +616,6 @@ struct dma_chan *dma_get_any_slave_chann
  62. {
  63. dma_cap_mask_t mask;
  64. struct dma_chan *chan;
  65. - int err;
  66. dma_cap_zero(mask);
  67. dma_cap_set(DMA_SLAVE, mask);
  68. @@ -588,23 +623,11 @@ struct dma_chan *dma_get_any_slave_chann
  69. /* lock against __dma_request_channel */
  70. mutex_lock(&dma_list_mutex);
  71. - chan = private_candidate(&mask, device, NULL, NULL);
  72. - if (chan) {
  73. - dma_cap_set(DMA_PRIVATE, device->cap_mask);
  74. - device->privatecnt++;
  75. - err = dma_chan_get(chan);
  76. - if (err) {
  77. - pr_debug("%s: failed to get %s: (%d)\n",
  78. - __func__, dma_chan_name(chan), err);
  79. - chan = NULL;
  80. - if (--device->privatecnt == 0)
  81. - dma_cap_clear(DMA_PRIVATE, device->cap_mask);
  82. - }
  83. - }
  84. + chan = find_candidate(device, &mask, NULL, NULL);
  85. mutex_unlock(&dma_list_mutex);
  86. - return chan;
  87. + return IS_ERR(chan) ? NULL : chan;
  88. }
  89. EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
  90. @@ -621,35 +644,15 @@ struct dma_chan *__dma_request_channel(c
  91. {
  92. struct dma_device *device, *_d;
  93. struct dma_chan *chan = NULL;
  94. - int err;
  95. /* Find a channel */
  96. mutex_lock(&dma_list_mutex);
  97. list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
  98. - chan = private_candidate(mask, device, fn, fn_param);
  99. - if (chan) {
  100. - /* Found a suitable channel, try to grab, prep, and
  101. - * return it. We first set DMA_PRIVATE to disable
  102. - * balance_ref_count as this channel will not be
  103. - * published in the general-purpose allocator
  104. - */
  105. - dma_cap_set(DMA_PRIVATE, device->cap_mask);
  106. - device->privatecnt++;
  107. - err = dma_chan_get(chan);
  108. + chan = find_candidate(device, mask, fn, fn_param);
  109. + if (!IS_ERR(chan))
  110. + break;
  111. - if (err == -ENODEV) {
  112. - pr_debug("%s: %s module removed\n",
  113. - __func__, dma_chan_name(chan));
  114. - list_del_rcu(&device->global_node);
  115. - } else if (err)
  116. - pr_debug("%s: failed to get %s: (%d)\n",
  117. - __func__, dma_chan_name(chan), err);
  118. - else
  119. - break;
  120. - if (--device->privatecnt == 0)
  121. - dma_cap_clear(DMA_PRIVATE, device->cap_mask);
  122. - chan = NULL;
  123. - }
  124. + chan = NULL;
  125. }
  126. mutex_unlock(&dma_list_mutex);