From 0c8e3cfdca3c8cb46b77475e2dcb091fab245f20 Mon Sep 17 00:00:00 2001 From: Ruben Rodriguez <ruben@trisquel.info> Date: Wed, 30 Aug 2017 11:46:56 -0400 Subject: [PATCH] Updated make-linux-hwe for 4.10 --- ...nfig-build-bits-for-BFQ-v7r11-4.10..patch} | 12 +- ...-the-BFQ-v7r11-I-O-sched-for-4.10.0.patch} | 7 +- ...ly-Queue-Merge-EQM-to-BFQ-v7r11-for.patch} | 8 +- ...or-4.10.0-into-BFQ-v8r11-for-4.10.0.patch} | 3332 +++++++++++++---- .../linux-hwe/{deblob-4.8 => deblob-4.10} | 163 +- helpers/DATA/linux-hwe/deblob-check | 167 +- helpers/DATA/linux-hwe/deblob-main | 4 +- .../linux-hwe/silent-accept-firmware.patch | 483 ++- helpers/make-linux-hwe | 4 +- 9 files changed, 3149 insertions(+), 1031 deletions(-) rename helpers/DATA/linux-hwe/{0001-block-cgroups-kconfig-build-bits-for-BFQ-v7r11-4.8.0.patch => 0001-block-cgroups-kconfig-build-bits-for-BFQ-v7r11-4.10..patch} (94%) rename helpers/DATA/linux-hwe/{0002-block-introduce-the-BFQ-v7r11-I-O-sched-to-be-ported.patch => 0002-block-introduce-the-BFQ-v7r11-I-O-sched-for-4.10.0.patch} (99%) rename helpers/DATA/linux-hwe/{0003-block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r11-to-.patch => 0003-block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r11-for.patch} (99%) rename helpers/DATA/linux-hwe/{0004-Turn-BFQ-v7r11-into-BFQ-v8r4-for-4.8.0.patch => 0004-Turn-BFQ-v7r11-for-4.10.0-into-BFQ-v8r11-for-4.10.0.patch} (72%) rename helpers/DATA/linux-hwe/{deblob-4.8 => deblob-4.10} (96%) diff --git a/helpers/DATA/linux-hwe/0001-block-cgroups-kconfig-build-bits-for-BFQ-v7r11-4.8.0.patch b/helpers/DATA/linux-hwe/0001-block-cgroups-kconfig-build-bits-for-BFQ-v7r11-4.10..patch similarity index 94% rename from helpers/DATA/linux-hwe/0001-block-cgroups-kconfig-build-bits-for-BFQ-v7r11-4.8.0.patch rename to helpers/DATA/linux-hwe/0001-block-cgroups-kconfig-build-bits-for-BFQ-v7r11-4.10..patch index 35cd1cef..45f4fd2e 100644 --- a/helpers/DATA/linux-hwe/0001-block-cgroups-kconfig-build-bits-for-BFQ-v7r11-4.8.0.patch +++ b/helpers/DATA/linux-hwe/0001-block-cgroups-kconfig-build-bits-for-BFQ-v7r11-4.10..patch @@ -1,7 +1,7 @@ -From f2ebe596e7d72e96e0fb2be87be90f0b96e6f1b3 Mon Sep 17 00:00:00 2001 +From 8500f47272575b4616beb487c483019248d8c501 Mon Sep 17 00:00:00 2001 From: Paolo Valente <paolo.valente@unimore.it> Date: Tue, 7 Apr 2015 13:39:12 +0200 -Subject: [PATCH 1/4] block: cgroups, kconfig, build bits for BFQ-v7r11-4.8.0 +Subject: [PATCH 1/4] block: cgroups, kconfig, build bits for BFQ-v7r11-4.10.0 Update Kconfig.iosched and do the related Makefile changes to include kernel configuration options for BFQ. Also increase the number of @@ -74,7 +74,7 @@ index 421bef9..0ee5f0f 100644 endmenu diff --git a/block/Makefile b/block/Makefile -index 9eda232..4a36683 100644 +index a827f98..3b14703 100644 --- a/block/Makefile +++ b/block/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o @@ -86,10 +86,10 @@ index 9eda232..4a36683 100644 obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index e79055c..931ff1e 100644 +index 1ca8e8f..8e2d6ed 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h -@@ -45,7 +45,7 @@ struct pr_ops; +@@ -47,7 +47,7 @@ struct rq_wb; * Maximum number of blkcg policies allowed to be registered concurrently. * Defined here to simplify include dependency. */ @@ -99,5 +99,5 @@ index e79055c..931ff1e 100644 typedef void (rq_end_io_fn)(struct request *, int); -- -2.7.4 (Apple Git-66) +2.10.0 diff --git a/helpers/DATA/linux-hwe/0002-block-introduce-the-BFQ-v7r11-I-O-sched-to-be-ported.patch b/helpers/DATA/linux-hwe/0002-block-introduce-the-BFQ-v7r11-I-O-sched-for-4.10.0.patch similarity index 99% rename from helpers/DATA/linux-hwe/0002-block-introduce-the-BFQ-v7r11-I-O-sched-to-be-ported.patch rename to helpers/DATA/linux-hwe/0002-block-introduce-the-BFQ-v7r11-I-O-sched-for-4.10.0.patch index 7cc8ce1c..0812a579 100644 --- a/helpers/DATA/linux-hwe/0002-block-introduce-the-BFQ-v7r11-I-O-sched-to-be-ported.patch +++ b/helpers/DATA/linux-hwe/0002-block-introduce-the-BFQ-v7r11-I-O-sched-for-4.10.0.patch @@ -1,8 +1,7 @@ -From d9af6fcc4167cbb8433b10bbf3663c8297487f52 Mon Sep 17 00:00:00 2001 +From 2f56e91506b329ffc29d0f184924ad0123c9ba9e Mon Sep 17 00:00:00 2001 From: Paolo Valente <paolo.valente@unimore.it> Date: Thu, 9 May 2013 19:10:02 +0200 -Subject: [PATCH 2/4] block: introduce the BFQ-v7r11 I/O sched, to be ported to - 4.8.0 +Subject: [PATCH 2/4] block: introduce the BFQ-v7r11 I/O sched for 4.10.0 The general structure is borrowed from CFQ, as much of the code for handling I/O contexts. Over time, several useful features have been @@ -7106,5 +7105,5 @@ index 0000000..2bf54ae + +#endif /* _BFQ_H */ -- -2.7.4 (Apple Git-66) +2.10.0 diff --git a/helpers/DATA/linux-hwe/0003-block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r11-to-.patch b/helpers/DATA/linux-hwe/0003-block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r11-for.patch similarity index 99% rename from helpers/DATA/linux-hwe/0003-block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r11-to-.patch rename to helpers/DATA/linux-hwe/0003-block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r11-for.patch index 2a53175b..28eeb1f7 100644 --- a/helpers/DATA/linux-hwe/0003-block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r11-to-.patch +++ b/helpers/DATA/linux-hwe/0003-block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r11-for.patch @@ -1,8 +1,8 @@ -From 409e62551360d2802992b0175062237352793a2a Mon Sep 17 00:00:00 2001 +From e4d9bed2dfdec562b23491e44602c89c4a2a5ea4 Mon Sep 17 00:00:00 2001 From: Mauro Andreolini <mauro.andreolini@unimore.it> Date: Sun, 6 Sep 2015 16:09:05 +0200 -Subject: [PATCH 3/4] block, bfq: add Early Queue Merge (EQM) to BFQ-v7r11, to - port to 4.8.0 +Subject: [PATCH 3/4] block, bfq: add Early Queue Merge (EQM) to BFQ-v7r11 for + 4.10.0 A set of processes may happen to perform interleaved reads, i.e.,requests whose union would give rise to a sequential read pattern. There are two @@ -1097,5 +1097,5 @@ index 2bf54ae..fcce855 100644 static void bfq_put_queue(struct bfq_queue *bfqq); static void bfq_dispatch_insert(struct request_queue *q, struct request *rq); -- -2.7.4 (Apple Git-66) +2.10.0 diff --git a/helpers/DATA/linux-hwe/0004-Turn-BFQ-v7r11-into-BFQ-v8r4-for-4.8.0.patch b/helpers/DATA/linux-hwe/0004-Turn-BFQ-v7r11-for-4.10.0-into-BFQ-v8r11-for-4.10.0.patch similarity index 72% rename from helpers/DATA/linux-hwe/0004-Turn-BFQ-v7r11-into-BFQ-v8r4-for-4.8.0.patch rename to helpers/DATA/linux-hwe/0004-Turn-BFQ-v7r11-for-4.10.0-into-BFQ-v8r11-for-4.10.0.patch index 62cdd1aa..86ff3b63 100644 --- a/helpers/DATA/linux-hwe/0004-Turn-BFQ-v7r11-into-BFQ-v8r4-for-4.8.0.patch +++ b/helpers/DATA/linux-hwe/0004-Turn-BFQ-v7r11-for-4.10.0-into-BFQ-v8r11-for-4.10.0.patch @@ -1,22 +1,588 @@ -From ec8981e245dfe24bc6a80207e832ca9be18fd39d Mon Sep 17 00:00:00 2001 +From a97fff52b333556bc4f2c990b4548667b4ac8af1 Mon Sep 17 00:00:00 2001 From: Paolo Valente <paolo.valente@linaro.org> -Date: Tue, 17 May 2016 08:28:04 +0200 -Subject: [PATCH 4/4] Turn BFQ-v7r11 into BFQ-v8r4 for 4.8.0 +Date: Mon, 16 May 2016 11:16:17 +0200 +Subject: [PATCH 4/4] Turn BFQ-v7r11 for 4.10.0 into BFQ-v8r11 for 4.10.0 Signed-off-by: Paolo Valente <paolo.valente@linaro.org> --- - block/Kconfig.iosched | 2 +- - block/bfq-cgroup.c | 495 ++++---- - block/bfq-iosched.c | 3230 +++++++++++++++++++++++++++++++------------------ - block/bfq-sched.c | 480 ++++++-- - block/bfq.h | 747 ++++++------ - 5 files changed, 3073 insertions(+), 1881 deletions(-) + Documentation/block/00-INDEX | 2 + + Documentation/block/bfq-iosched.txt | 530 ++++++ + block/Kconfig.iosched | 18 +- + block/bfq-cgroup.c | 511 +++--- + block/bfq-iosched.c | 3448 ++++++++++++++++++++++------------- + block/bfq-sched.c | 1344 +++++++++++--- + block/bfq.h | 800 ++++---- + 7 files changed, 4467 insertions(+), 2186 deletions(-) + create mode 100644 Documentation/block/bfq-iosched.txt +diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX +index e55103a..8d55b4b 100644 +--- a/Documentation/block/00-INDEX ++++ b/Documentation/block/00-INDEX +@@ -1,5 +1,7 @@ + 00-INDEX + - This file ++bfq-iosched.txt ++ - BFQ IO scheduler and its tunables + biodoc.txt + - Notes on the Generic Block Layer Rewrite in Linux 2.5 + biovecs.txt +diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt +new file mode 100644 +index 0000000..13b5248 +--- /dev/null ++++ b/Documentation/block/bfq-iosched.txt +@@ -0,0 +1,530 @@ ++BFQ (Budget Fair Queueing) ++========================== ++ ++BFQ is a proportional-share I/O scheduler, with some extra ++low-latency capabilities. In addition to cgroups support (blkio or io ++controllers), BFQ's main features are: ++- BFQ guarantees a high system and application responsiveness, and a ++ low latency for time-sensitive applications, such as audio or video ++ players; ++- BFQ distributes bandwidth, and not just time, among processes or ++ groups (switching back to time distribution when needed to keep ++ throughput high). ++ ++On average CPUs, the current version of BFQ can handle devices ++performing at most ~30K IOPS; at most ~50 KIOPS on faster CPUs. As a ++reference, 30-50 KIOPS correspond to very high bandwidths with ++sequential I/O (e.g., 8-12 GB/s if I/O requests are 256 KB large), and ++to 120-200 MB/s with 4KB random I/O. ++ ++The table of contents follow. Impatients can just jump to Section 3. ++ ++CONTENTS ++ ++1. When may BFQ be useful? ++ 1-1 Personal systems ++ 1-2 Server systems ++2. How does BFQ work? ++3. What are BFQ's tunable? ++4. BFQ group scheduling ++ 4-1 Service guarantees provided ++ 4-2 Interface ++ ++1. When may BFQ be useful? ++========================== ++ ++BFQ provides the following benefits on personal and server systems. ++ ++1-1 Personal systems ++-------------------- ++ ++Low latency for interactive applications ++ ++Regardless of the actual background workload, BFQ guarantees that, for ++interactive tasks, the storage device is virtually as responsive as if ++it was idle. For example, even if one or more of the following ++background workloads are being executed: ++- one or more large files are being read, written or copied, ++- a tree of source files is being compiled, ++- one or more virtual machines are performing I/O, ++- a software update is in progress, ++- indexing daemons are scanning filesystems and updating their ++ databases, ++starting an application or loading a file from within an application ++takes about the same time as if the storage device was idle. As a ++comparison, with CFQ, NOOP or DEADLINE, and in the same conditions, ++applications experience high latencies, or even become unresponsive ++until the background workload terminates (also on SSDs). ++ ++Low latency for soft real-time applications ++ ++Also soft real-time applications, such as audio and video ++players/streamers, enjoy a low latency and a low drop rate, regardless ++of the background I/O workload. As a consequence, these applications ++do not suffer from almost any glitch due to the background workload. ++ ++Higher speed for code-development tasks ++ ++If some additional workload happens to be executed in parallel, then ++BFQ executes the I/O-related components of typical code-development ++tasks (compilation, checkout, merge, ...) much more quickly than CFQ, ++NOOP or DEADLINE. ++ ++High throughput ++ ++On hard disks, BFQ achieves up to 30% higher throughput than CFQ, and ++up to 150% higher throughput than DEADLINE and NOOP, with all the ++sequential workloads considered in our tests. With random workloads, ++and with all the workloads on flash-based devices, BFQ achieves, ++instead, about the same throughput as the other schedulers. ++ ++Strong fairness, bandwidth and delay guarantees ++ ++BFQ distributes the device throughput, and not just the device time, ++among I/O-bound applications in proportion their weights, with any ++workload and regardless of the device parameters. From these bandwidth ++guarantees, it is possible to compute tight per-I/O-request delay ++guarantees by a simple formula. If not configured for strict service ++guarantees, BFQ switches to time-based resource sharing (only) for ++applications that would otherwise cause a throughput loss. ++ ++1-2 Server systems ++------------------ ++ ++Most benefits for server systems follow from the same service ++properties as above. In particular, regardless of whether additional, ++possibly heavy workloads are being served, BFQ guarantees: ++ ++. audio and video-streaming with zero or very low jitter and drop ++ rate; ++ ++. fast retrieval of WEB pages and embedded objects; ++ ++. real-time recording of data in live-dumping applications (e.g., ++ packet logging); ++ ++. responsiveness in local and remote access to a server. ++ ++ ++2. How does BFQ work? ++===================== ++ ++BFQ is a proportional-share I/O scheduler, whose general structure, ++plus a lot of code, are borrowed from CFQ. ++ ++- Each process doing I/O on a device is associated with a weight and a ++ (bfq_)queue. ++ ++- BFQ grants exclusive access to the device, for a while, to one queue ++ (process) at a time, and implements this service model by ++ associating every queue with a budget, measured in number of ++ sectors. ++ ++ - After a queue is granted access to the device, the budget of the ++ queue is decremented, on each request dispatch, by the size of the ++ request. ++ ++ - The in-service queue is expired, i.e., its service is suspended, ++ only if one of the following events occurs: 1) the queue finishes ++ its budget, 2) the queue empties, 3) a "budget timeout" fires. ++ ++ - The budget timeout prevents processes doing random I/O from ++ holding the device for too long and dramatically reducing ++ throughput. ++ ++ - Actually, as in CFQ, a queue associated with a process issuing ++ sync requests may not be expired immediately when it empties. In ++ contrast, BFQ may idle the device for a short time interval, ++ giving the process the chance to go on being served if it issues ++ a new request in time. Device idling typically boosts the ++ throughput on rotational devices, if processes do synchronous ++ and sequential I/O. In addition, under BFQ, device idling is ++ also instrumental in guaranteeing the desired throughput ++ fraction to processes issuing sync requests (see the description ++ of the slice_idle tunable in this document, or [1, 2], for more ++ details). ++ ++ - With respect to idling for service guarantees, if several ++ processes are competing for the device at the same time, but ++ all processes (and groups, after the following commit) have ++ the same weight, then BFQ guarantees the expected throughput ++ distribution without ever idling the device. Throughput is ++ thus as high as possible in this common scenario. ++ ++ - If low-latency mode is enabled (default configuration), BFQ ++ executes some special heuristics to detect interactive and soft ++ real-time applications (e.g., video or audio players/streamers), ++ and to reduce their latency. The most important action taken to ++ achieve this goal is to give to the queues associated with these ++ applications more than their fair share of the device ++ throughput. For brevity, we call just "weight-raising" the whole ++ sets of actions taken by BFQ to privilege these queues. In ++ particular, BFQ provides a milder form of weight-raising for ++ interactive applications, and a stronger form for soft real-time ++ applications. ++ ++ - BFQ automatically deactivates idling for queues born in a burst of ++ queue creations. In fact, these queues are usually associated with ++ the processes of applications and services that benefit mostly ++ from a high throughput. Examples are systemd during boot, or git ++ grep. ++ ++ - As CFQ, BFQ merges queues performing interleaved I/O, i.e., ++ performing random I/O that becomes mostly sequential if ++ merged. Differently from CFQ, BFQ achieves this goal with a more ++ reactive mechanism, called Early Queue Merge (EQM). EQM is so ++ responsive in detecting interleaved I/O (cooperating processes), ++ that it enables BFQ to achieve a high throughput, by queue ++ merging, even for queues for which CFQ needs a different ++ mechanism, preemption, to get a high throughput. As such EQM is a ++ unified mechanism to achieve a high throughput with interleaved ++ I/O. ++ ++ - Queues are scheduled according to a variant of WF2Q+, named ++ B-WF2Q+, and implemented using an augmented rb-tree to preserve an ++ O(log N) overall complexity. See [2] for more details. B-WF2Q+ is ++ also ready for hierarchical scheduling. However, for a cleaner ++ logical breakdown, the code that enables and completes ++ hierarchical support is provided in the next commit, which focuses ++ exactly on this feature. ++ ++ - B-WF2Q+ guarantees a tight deviation with respect to an ideal, ++ perfectly fair, and smooth service. In particular, B-WF2Q+ ++ guarantees that each queue receives a fraction of the device ++ throughput proportional to its weight, even if the throughput ++ fluctuates, and regardless of: the device parameters, the current ++ workload and the budgets assigned to the queue. ++ ++ - The last, budget-independence, property (although probably ++ counterintuitive in the first place) is definitely beneficial, for ++ the following reasons: ++ ++ - First, with any proportional-share scheduler, the maximum ++ deviation with respect to an ideal service is proportional to ++ the maximum budget (slice) assigned to queues. As a consequence, ++ BFQ can keep this deviation tight not only because of the ++ accurate service of B-WF2Q+, but also because BFQ *does not* ++ need to assign a larger budget to a queue to let the queue ++ receive a higher fraction of the device throughput. ++ ++ - Second, BFQ is free to choose, for every process (queue), the ++ budget that best fits the needs of the process, or best ++ leverages the I/O pattern of the process. In particular, BFQ ++ updates queue budgets with a simple feedback-loop algorithm that ++ allows a high throughput to be achieved, while still providing ++ tight latency guarantees to time-sensitive applications. When ++ the in-service queue expires, this algorithm computes the next ++ budget of the queue so as to: ++ ++ - Let large budgets be eventually assigned to the queues ++ associated with I/O-bound applications performing sequential ++ I/O: in fact, the longer these applications are served once ++ got access to the device, the higher the throughput is. ++ ++ - Let small budgets be eventually assigned to the queues ++ associated with time-sensitive applications (which typically ++ perform sporadic and short I/O), because, the smaller the ++ budget assigned to a queue waiting for service is, the sooner ++ B-WF2Q+ will serve that queue (Subsec 3.3 in [2]). ++ ++- If several processes are competing for the device at the same time, ++ but all processes and groups have the same weight, then BFQ ++ guarantees the expected throughput distribution without ever idling ++ the device. It uses preemption instead. Throughput is then much ++ higher in this common scenario. ++ ++- ioprio classes are served in strict priority order, i.e., ++ lower-priority queues are not served as long as there are ++ higher-priority queues. Among queues in the same class, the ++ bandwidth is distributed in proportion to the weight of each ++ queue. A very thin extra bandwidth is however guaranteed to ++ the Idle class, to prevent it from starving. ++ ++ ++3. What are BFQ's tunable? ++========================== ++ ++The tunables back_seek-max, back_seek_penalty, fifo_expire_async and ++fifo_expire_sync below are the same as in CFQ. Their description is ++just copied from that for CFQ. Some considerations in the description ++of slice_idle are copied from CFQ too. ++ ++per-process ioprio and weight ++----------------------------- ++ ++Unless the cgroups interface is used (see "4. BFQ group scheduling"), ++weights can be assigned to processes only indirectly, through I/O ++priorities, and according to the relation: ++weight = (IOPRIO_BE_NR - ioprio) * 10. ++ ++Beware that, if low-latency is set, then BFQ automatically raises the ++weight of the queues associated with interactive and soft real-time ++applications. Unset this tunable if you need/want to control weights. ++ ++slice_idle ++---------- ++ ++This parameter specifies how long BFQ should idle for next I/O ++request, when certain sync BFQ queues become empty. By default ++slice_idle is a non-zero value. Idling has a double purpose: boosting ++throughput and making sure that the desired throughput distribution is ++respected (see the description of how BFQ works, and, if needed, the ++papers referred there). ++ ++As for throughput, idling can be very helpful on highly seeky media ++like single spindle SATA/SAS disks where we can cut down on overall ++number of seeks and see improved throughput. ++ ++Setting slice_idle to 0 will remove all the idling on queues and one ++should see an overall improved throughput on faster storage devices ++like multiple SATA/SAS disks in hardware RAID configuration. ++ ++So depending on storage and workload, it might be useful to set ++slice_idle=0. In general for SATA/SAS disks and software RAID of ++SATA/SAS disks keeping slice_idle enabled should be useful. For any ++configurations where there are multiple spindles behind single LUN ++(Host based hardware RAID controller or for storage arrays), setting ++slice_idle=0 might end up in better throughput and acceptable ++latencies. ++ ++Idling is however necessary to have service guarantees enforced in ++case of differentiated weights or differentiated I/O-request lengths. ++To see why, suppose that a given BFQ queue A must get several I/O ++requests served for each request served for another queue B. Idling ++ensures that, if A makes a new I/O request slightly after becoming ++empty, then no request of B is dispatched in the middle, and thus A ++does not lose the possibility to get more than one request dispatched ++before the next request of B is dispatched. Note that idling ++guarantees the desired differentiated treatment of queues only in ++terms of I/O-request dispatches. To guarantee that the actual service ++order then corresponds to the dispatch order, the strict_guarantees ++tunable must be set too. ++ ++There is an important flipside for idling: apart from the above cases ++where it is beneficial also for throughput, idling can severely impact ++throughput. One important case is random workload. Because of this ++issue, BFQ tends to avoid idling as much as possible, when it is not ++beneficial also for throughput. As a consequence of this behavior, and ++of further issues described for the strict_guarantees tunable, ++short-term service guarantees may be occasionally violated. And, in ++some cases, these guarantees may be more important than guaranteeing ++maximum throughput. For example, in video playing/streaming, a very ++low drop rate may be more important than maximum throughput. In these ++cases, consider setting the strict_guarantees parameter. ++ ++strict_guarantees ++----------------- ++ ++If this parameter is set (default: unset), then BFQ ++ ++- always performs idling when the in-service queue becomes empty; ++ ++- forces the device to serve one I/O request at a time, by dispatching a ++ new request only if there is no outstanding request. ++ ++In the presence of differentiated weights or I/O-request sizes, both ++the above conditions are needed to guarantee that every BFQ queue ++receives its allotted share of the bandwidth. The first condition is ++needed for the reasons explained in the description of the slice_idle ++tunable. The second condition is needed because all modern storage ++devices reorder internally-queued requests, which may trivially break ++the service guarantees enforced by the I/O scheduler. ++ ++Setting strict_guarantees may evidently affect throughput. ++ ++back_seek_max ++------------- ++ ++This specifies, given in Kbytes, the maximum "distance" for backward seeking. ++The distance is the amount of space from the current head location to the ++sectors that are backward in terms of distance. ++ ++This parameter allows the scheduler to anticipate requests in the "backward" ++direction and consider them as being the "next" if they are within this ++distance from the current head location. ++ ++back_seek_penalty ++----------------- ++ ++This parameter is used to compute the cost of backward seeking. If the ++backward distance of request is just 1/back_seek_penalty from a "front" ++request, then the seeking cost of two requests is considered equivalent. ++ ++So scheduler will not bias toward one or the other request (otherwise scheduler ++will bias toward front request). Default value of back_seek_penalty is 2. ++ ++fifo_expire_async ++----------------- ++ ++This parameter is used to set the timeout of asynchronous requests. Default ++value of this is 248ms. ++ ++fifo_expire_sync ++---------------- ++ ++This parameter is used to set the timeout of synchronous requests. Default ++value of this is 124ms. In case to favor synchronous requests over asynchronous ++one, this value should be decreased relative to fifo_expire_async. ++ ++low_latency ++----------- ++ ++This parameter is used to enable/disable BFQ's low latency mode. By ++default, low latency mode is enabled. If enabled, interactive and soft ++real-time applications are privileged and experience a lower latency, ++as explained in more detail in the description of how BFQ works. ++ ++DO NOT enable this mode if you need full control on bandwidth ++distribution. In fact, if it is enabled, then BFQ automatically ++increases the bandwidth share of privileged applications, as the main ++means to guarantee a lower latency to them. ++ ++timeout_sync ++------------ ++ ++Maximum amount of device time that can be given to a task (queue) once ++it has been selected for service. On devices with costly seeks, ++increasing this time usually increases maximum throughput. On the ++opposite end, increasing this time coarsens the granularity of the ++short-term bandwidth and latency guarantees, especially if the ++following parameter is set to zero. ++ ++max_budget ++---------- ++ ++Maximum amount of service, measured in sectors, that can be provided ++to a BFQ queue once it is set in service (of course within the limits ++of the above timeout). According to what said in the description of ++the algorithm, larger values increase the throughput in proportion to ++the percentage of sequential I/O requests issued. The price of larger ++values is that they coarsen the granularity of short-term bandwidth ++and latency guarantees. ++ ++The default value is 0, which enables auto-tuning: BFQ sets max_budget ++to the maximum number of sectors that can be served during ++timeout_sync, according to the estimated peak rate. ++ ++weights ++------- ++ ++Read-only parameter, used to show the weights of the currently active ++BFQ queues. ++ ++ ++wr_ tunables ++------------ ++ ++BFQ exports a few parameters to control/tune the behavior of ++low-latency heuristics. ++ ++wr_coeff ++ ++Factor by which the weight of a weight-raised queue is multiplied. If ++the queue is deemed soft real-time, then the weight is further ++multiplied by an additional, constant factor. ++ ++wr_max_time ++ ++Maximum duration of a weight-raising period for an interactive task ++(ms). If set to zero (default value), then this value is computed ++automatically, as a function of the peak rate of the device. In any ++case, when the value of this parameter is read, it always reports the ++current duration, regardless of whether it has been set manually or ++computed automatically. ++ ++wr_max_softrt_rate ++ ++Maximum service rate below which a queue is deemed to be associated ++with a soft real-time application, and is then weight-raised ++accordingly (sectors/sec). ++ ++wr_min_idle_time ++ ++Minimum idle period after which interactive weight-raising may be ++reactivated for a queue (in ms). ++ ++wr_rt_max_time ++ ++Maximum weight-raising duration for soft real-time queues (in ms). The ++start time from which this duration is considered is automatically ++moved forward if the queue is detected to be still soft real-time ++before the current soft real-time weight-raising period finishes. ++ ++wr_min_inter_arr_async ++ ++Minimum period between I/O request arrivals after which weight-raising ++may be reactivated for an already busy async queue (in ms). ++ ++ ++4. Group scheduling with BFQ ++============================ ++ ++BFQ supports both cgroups-v1 and cgroups-v2 io controllers, namely ++blkio and io. In particular, BFQ supports weight-based proportional ++share. To activate cgroups support, set BFQ_GROUP_IOSCHED. ++ ++4-1 Service guarantees provided ++------------------------------- ++ ++With BFQ, proportional share means true proportional share of the ++device bandwidth, according to group weights. For example, a group ++with weight 200 gets twice the bandwidth, and not just twice the time, ++of a group with weight 100. ++ ++BFQ supports hierarchies (group trees) of any depth. Bandwidth is ++distributed among groups and processes in the expected way: for each ++group, the children of the group share the whole bandwidth of the ++group in proportion to their weights. In particular, this implies ++that, for each leaf group, every process of the group receives the ++same share of the whole group bandwidth, unless the ioprio of the ++process is modified. ++ ++The resource-sharing guarantee for a group may partially or totally ++switch from bandwidth to time, if providing bandwidth guarantees to ++the group lowers the throughput too much. This switch occurs on a ++per-process basis: if a process of a leaf group causes throughput loss ++if served in such a way to receive its share of the bandwidth, then ++BFQ switches back to just time-based proportional share for that ++process. ++ ++4-2 Interface ++------------- ++ ++To get proportional sharing of bandwidth with BFQ for a given device, ++BFQ must of course be the active scheduler for that device. ++ ++Within each group directory, the names of the files associated with ++BFQ-specific cgroup parameters and stats begin with the "bfq." ++prefix. So, with cgroups-v1 or cgroups-v2, the full prefix for ++BFQ-specific files is "blkio.bfq." or "io.bfq." For example, the group ++parameter to set the weight of a group with BFQ is blkio.bfq.weight ++or io.bfq.weight. ++ ++Parameters to set ++----------------- ++ ++For each group, there is only the following parameter to set. ++ ++weight (namely blkio.bfq.weight or io.bfq-weight): the weight of the ++group inside its parent. Available values: 1..10000 (default 100). The ++linear mapping between ioprio and weights, described at the beginning ++of the tunable section, is still valid, but all weights higher than ++IOPRIO_BE_NR*10 are mapped to ioprio 0. ++ ++Recall that, if low-latency is set, then BFQ automatically raises the ++weight of the queues associated with interactive and soft real-time ++applications. Unset this tunable if you need/want to control weights. ++ ++ ++[1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O ++ Scheduler", Proceedings of the First Workshop on Mobile System ++ Technologies (MST-2015), May 2015. ++ http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf ++ ++[2] P. Valente and M. Andreolini, "Improving Application ++ Responsiveness with the BFQ Disk I/O Scheduler", Proceedings of ++ the 5th Annual International Systems and Storage Conference ++ (SYSTOR '12), June 2012. ++ Slightly extended version: ++ http://algogroup.unimore.it/people/paolo/disk_sched/bfq-v1-suite- ++ results.pdf diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched -index f78cd1a..6d92579 100644 +index f78cd1a..f2cd945 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched -@@ -53,7 +53,7 @@ config IOSCHED_BFQ +@@ -43,20 +43,20 @@ config IOSCHED_BFQ + tristate "BFQ I/O scheduler" + default n + ---help--- +- The BFQ I/O scheduler tries to distribute bandwidth among +- all processes according to their weights. +- It aims at distributing the bandwidth as desired, independently of +- the disk parameters and with any workload. It also tries to +- guarantee low latency to interactive and soft real-time +- applications. If compiled built-in (saying Y here), BFQ can +- be configured to support hierarchical scheduling. ++ The BFQ I/O scheduler distributes bandwidth among all ++ processes according to their weights, regardless of the ++ device parameters and with any workload. It also guarantees ++ a low latency to interactive and soft real-time applications. ++ Details in Documentation/block/bfq-iosched.txt config BFQ_GROUP_IOSCHED bool "BFQ hierarchical scheduling support" @@ -24,9 +590,15 @@ index f78cd1a..6d92579 100644 + depends on IOSCHED_BFQ && BLK_CGROUP default n ---help--- - Enable hierarchical scheduling in BFQ, using the blkio controller. +- Enable hierarchical scheduling in BFQ, using the blkio controller. ++ ++ Enable hierarchical scheduling in BFQ, using the blkio ++ (cgroups-v1) or io (cgroups-v2) controller. + + choice + prompt "Default I/O scheduler" diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c -index 0367996..b50ae8e 100644 +index 0367996..39daaf4 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -7,7 +7,9 @@ @@ -49,15 +621,15 @@ index 0367996..b50ae8e 100644 return pd_to_bfqg(pd); } -@@ -208,59 +208,49 @@ static void bfqg_put(struct bfq_group *bfqg) +@@ -208,59 +208,47 @@ static void bfqg_put(struct bfq_group *bfqg) static void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, - int rw) -+ int op, int op_flags) ++ unsigned int op) { - blkg_rwstat_add(&bfqg->stats.queued, rw, 1); -+ blkg_rwstat_add(&bfqg->stats.queued, op, op_flags, 1); ++ blkg_rwstat_add(&bfqg->stats.queued, op, 1); bfqg_stats_end_empty_time(&bfqg->stats); if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue)) bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq)); @@ -69,39 +641,37 @@ index 0367996..b50ae8e 100644 -} - -static void bfqg_stats_update_io_merged(struct bfq_group *bfqg, int rw) -+static void bfqg_stats_update_io_remove(struct bfq_group *bfqg, int op, -+ int op_flags) ++static void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { - blkg_rwstat_add(&bfqg->stats.merged, rw, 1); -+ blkg_rwstat_add(&bfqg->stats.queued, op, op_flags, -1); ++ blkg_rwstat_add(&bfqg->stats.queued, op, -1); } -static void bfqg_stats_update_dispatch(struct bfq_group *bfqg, - uint64_t bytes, int rw) -+static void bfqg_stats_update_io_merged(struct bfq_group *bfqg, int op, -+ int op_flags) ++static void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { - blkg_stat_add(&bfqg->stats.sectors, bytes >> 9); - blkg_rwstat_add(&bfqg->stats.serviced, rw, 1); - blkg_rwstat_add(&bfqg->stats.service_bytes, rw, bytes); -+ blkg_rwstat_add(&bfqg->stats.merged, op, op_flags, 1); ++ blkg_rwstat_add(&bfqg->stats.merged, op, 1); } static void bfqg_stats_update_completion(struct bfq_group *bfqg, - uint64_t start_time, uint64_t io_start_time, int rw) -+ uint64_t start_time, uint64_t io_start_time, int op, -+ int op_flags) ++ uint64_t start_time, uint64_t io_start_time, ++ unsigned int op) { struct bfqg_stats *stats = &bfqg->stats; unsigned long long now = sched_clock(); if (time_after64(now, io_start_time)) - blkg_rwstat_add(&stats->service_time, rw, now - io_start_time); -+ blkg_rwstat_add(&stats->service_time, op, op_flags, ++ blkg_rwstat_add(&stats->service_time, op, + now - io_start_time); if (time_after64(io_start_time, start_time)) - blkg_rwstat_add(&stats->wait_time, rw, -+ blkg_rwstat_add(&stats->wait_time, op, op_flags, ++ blkg_rwstat_add(&stats->wait_time, op, io_start_time - start_time); } @@ -122,7 +692,7 @@ index 0367996..b50ae8e 100644 blkg_stat_reset(&stats->avg_queue_size_sum); blkg_stat_reset(&stats->avg_queue_size_samples); blkg_stat_reset(&stats->dequeue); -@@ -270,19 +260,16 @@ static void bfqg_stats_reset(struct bfqg_stats *stats) +@@ -270,19 +258,16 @@ static void bfqg_stats_reset(struct bfqg_stats *stats) } /* @to += @from */ @@ -143,7 +713,7 @@ index 0367996..b50ae8e 100644 blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples); -@@ -311,10 +298,8 @@ static void bfqg_stats_xfer_dead(struct bfq_group *bfqg) +@@ -311,10 +296,8 @@ static void bfqg_stats_xfer_dead(struct bfq_group *bfqg) if (unlikely(!parent)) return; @@ -155,7 +725,14 @@ index 0367996..b50ae8e 100644 } static void bfq_init_entity(struct bfq_entity *entity, -@@ -335,15 +320,11 @@ static void bfq_init_entity(struct bfq_entity *entity, +@@ -329,21 +312,17 @@ static void bfq_init_entity(struct bfq_entity *entity, + bfqq->ioprio_class = bfqq->new_ioprio_class; + bfqg_get(bfqg); + } +- entity->parent = bfqg->my_entity; ++ entity->parent = bfqg->my_entity; /* NULL for root group */ + entity->sched_data = &bfqg->sched_data; + } static void bfqg_stats_exit(struct bfqg_stats *stats) { @@ -171,7 +748,7 @@ index 0367996..b50ae8e 100644 blkg_stat_exit(&stats->avg_queue_size_sum); blkg_stat_exit(&stats->avg_queue_size_samples); blkg_stat_exit(&stats->dequeue); -@@ -354,15 +335,11 @@ static void bfqg_stats_exit(struct bfqg_stats *stats) +@@ -354,15 +333,11 @@ static void bfqg_stats_exit(struct bfqg_stats *stats) static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) { @@ -188,7 +765,7 @@ index 0367996..b50ae8e 100644 blkg_stat_init(&stats->avg_queue_size_sum, gfp) || blkg_stat_init(&stats->avg_queue_size_samples, gfp) || blkg_stat_init(&stats->dequeue, gfp) || -@@ -386,11 +363,27 @@ static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) +@@ -386,11 +361,27 @@ static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq)); } @@ -196,7 +773,7 @@ index 0367996..b50ae8e 100644 +{ + struct bfq_group_data *bgd; + -+ bgd = kzalloc(sizeof(*bgd), GFP_KERNEL); ++ bgd = kzalloc(sizeof(*bgd), gfp); + if (!bgd) + return NULL; + return &bgd->pd; @@ -217,7 +794,7 @@ index 0367996..b50ae8e 100644 } static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) -@@ -401,8 +394,7 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) +@@ -401,8 +392,7 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) if (!bfqg) return NULL; @@ -227,7 +804,7 @@ index 0367996..b50ae8e 100644 kfree(bfqg); return NULL; } -@@ -410,27 +402,20 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) +@@ -410,27 +400,20 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) return &bfqg->pd; } @@ -265,7 +842,7 @@ index 0367996..b50ae8e 100644 entity->orig_weight = entity->weight = entity->new_weight = d->weight; entity->my_sched_data = &bfqg->sched_data; -@@ -448,70 +433,53 @@ static void bfq_pd_free(struct blkg_policy_data *pd) +@@ -448,70 +431,53 @@ static void bfq_pd_free(struct blkg_policy_data *pd) struct bfq_group *bfqg = pd_to_bfqg(pd); bfqg_stats_exit(&bfqg->stats); @@ -363,7 +940,7 @@ index 0367996..b50ae8e 100644 /* * Update chain of bfq_groups as we might be handling a leaf group -@@ -537,11 +505,15 @@ static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd, +@@ -537,11 +503,15 @@ static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd, static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq); @@ -380,7 +957,7 @@ index 0367996..b50ae8e 100644 * @bfqg: the group to move to. * * Move @bfqq to @bfqg, deactivating it from its old group and reactivating -@@ -552,26 +524,40 @@ static void bfq_pos_tree_add_move(struct bfq_data *bfqd, +@@ -552,26 +522,40 @@ static void bfq_pos_tree_add_move(struct bfq_data *bfqd, * rcu_read_lock()). */ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, @@ -401,9 +978,7 @@ index 0367996..b50ae8e 100644 + && entity->on_st && bfqq != bfqd->in_service_queue); + BUG_ON(!bfq_bfqq_busy(bfqq) && bfqq == bfqd->in_service_queue); - -- if (busy) { -- BUG_ON(atomic_read(&bfqq->ref) < 2); ++ + /* If bfqq is empty, then bfq_bfqq_expire also invokes + * bfq_del_bfqq_busy, thereby removing bfqq and its entity + * from data structures related to current group. Otherwise we @@ -417,7 +992,9 @@ index 0367996..b50ae8e 100644 + BUG_ON(entity->on_st && !bfq_bfqq_busy(bfqq) + && &bfq_entity_service_tree(entity)->idle != + entity->tree); -+ + +- if (busy) { +- BUG_ON(atomic_read(&bfqq->ref) < 2); + BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_busy(bfqq)); - if (!resume) @@ -426,7 +1003,7 @@ index 0367996..b50ae8e 100644 - bfq_deactivate_bfqq(bfqd, bfqq, 0); - } else if (entity->on_st) + if (bfq_bfqq_busy(bfqq)) -+ bfq_deactivate_bfqq(bfqd, bfqq, 0); ++ bfq_deactivate_bfqq(bfqd, bfqq, false, false); + else if (entity->on_st) { + BUG_ON(&bfq_entity_service_tree(entity)->idle != + entity->tree); @@ -435,7 +1012,7 @@ index 0367996..b50ae8e 100644 bfqg_put(bfqq_group(bfqq)); /* -@@ -583,14 +569,17 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -583,14 +567,17 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, entity->sched_data = &bfqg->sched_data; bfqg_get(bfqg); @@ -456,7 +1033,7 @@ index 0367996..b50ae8e 100644 } /** -@@ -617,7 +606,11 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, +@@ -617,7 +604,11 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, lockdep_assert_held(bfqd->queue->queue_lock); @@ -469,7 +1046,7 @@ index 0367996..b50ae8e 100644 if (async_bfqq) { entity = &async_bfqq->entity; -@@ -625,7 +618,8 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, +@@ -625,7 +616,8 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, bic_set_bfqq(bic, NULL, 0); bfq_log_bfqq(bfqd, async_bfqq, "bic_change_group: %p %d", @@ -479,7 +1056,7 @@ index 0367996..b50ae8e 100644 bfq_put_queue(async_bfqq); } } -@@ -633,7 +627,7 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, +@@ -633,7 +625,7 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, if (sync_bfqq) { entity = &sync_bfqq->entity; if (entity->sched_data != &bfqg->sched_data) @@ -488,7 +1065,7 @@ index 0367996..b50ae8e 100644 } return bfqg; -@@ -642,25 +636,23 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, +@@ -642,25 +634,23 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) { struct bfq_data *bfqd = bic_to_bfqd(bic); @@ -522,7 +1099,16 @@ index 0367996..b50ae8e 100644 } /** -@@ -686,7 +678,7 @@ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, +@@ -672,7 +662,7 @@ static void bfq_flush_idle_tree(struct bfq_service_tree *st) + struct bfq_entity *entity = st->first_idle; + + for (; entity ; entity = st->first_idle) +- __bfq_deactivate_entity(entity, 0); ++ __bfq_deactivate_entity(entity, false); + } + + /** +@@ -686,7 +676,7 @@ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); BUG_ON(!bfqq); @@ -531,7 +1117,7 @@ index 0367996..b50ae8e 100644 } /** -@@ -717,11 +709,12 @@ static void bfq_reparent_active_entities(struct bfq_data *bfqd, +@@ -717,11 +707,12 @@ static void bfq_reparent_active_entities(struct bfq_data *bfqd, } /** @@ -548,9 +1134,14 @@ index 0367996..b50ae8e 100644 */ static void bfq_pd_offline(struct blkg_policy_data *pd) { -@@ -780,6 +773,12 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) +@@ -776,10 +767,15 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) + BUG_ON(bfqg->sched_data.next_in_service); + BUG_ON(bfqg->sched_data.in_service_entity); + +- __bfq_deactivate_entity(entity, 0); ++ __bfq_deactivate_entity(entity, false); bfq_put_async_queues(bfqd, bfqg); - BUG_ON(entity->tree); +- BUG_ON(entity->tree); + /* + * @blkg is going offline and will be ignored by @@ -561,7 +1152,7 @@ index 0367996..b50ae8e 100644 bfqg_stats_xfer_dead(bfqg); } -@@ -789,46 +788,35 @@ static void bfq_end_wr_async(struct bfq_data *bfqd) +@@ -789,46 +785,35 @@ static void bfq_end_wr_async(struct bfq_data *bfqd) list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) { struct bfq_group *bfqg = blkg_to_bfqg(blkg); @@ -619,7 +1210,7 @@ index 0367996..b50ae8e 100644 if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT) return ret; -@@ -873,13 +861,18 @@ static int bfqio_cgroup_weight_write(struct cgroup_subsys_state *css, +@@ -873,13 +858,18 @@ static int bfqio_cgroup_weight_write(struct cgroup_subsys_state *css, return ret; } @@ -643,7 +1234,7 @@ index 0367996..b50ae8e 100644 } static int bfqg_print_stat(struct seq_file *sf, void *v) -@@ -899,16 +892,17 @@ static int bfqg_print_rwstat(struct seq_file *sf, void *v) +@@ -899,16 +889,17 @@ static int bfqg_print_rwstat(struct seq_file *sf, void *v) static u64 bfqg_prfill_stat_recursive(struct seq_file *sf, struct blkg_policy_data *pd, int off) { @@ -665,7 +1256,7 @@ index 0367996..b50ae8e 100644 return __blkg_prfill_rwstat(sf, pd, &sum); } -@@ -928,6 +922,41 @@ static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v) +@@ -928,6 +919,41 @@ static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v) return 0; } @@ -707,7 +1298,7 @@ index 0367996..b50ae8e 100644 static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf, struct blkg_policy_data *pd, int off) { -@@ -964,38 +993,15 @@ bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) +@@ -964,38 +990,15 @@ bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) return blkg_to_bfqg(bfqd->queue->root_blkg); } @@ -751,7 +1342,7 @@ index 0367996..b50ae8e 100644 { .name = "bfq.time", .private = offsetof(struct bfq_group, stats.time), -@@ -1003,18 +1009,17 @@ static struct cftype bfqio_files[] = { +@@ -1003,18 +1006,17 @@ static struct cftype bfqio_files[] = { }, { .name = "bfq.sectors", @@ -775,7 +1366,7 @@ index 0367996..b50ae8e 100644 }, { .name = "bfq.io_service_time", -@@ -1045,18 +1050,17 @@ static struct cftype bfqio_files[] = { +@@ -1045,18 +1047,17 @@ static struct cftype bfqio_files[] = { }, { .name = "bfq.sectors_recursive", @@ -799,7 +1390,7 @@ index 0367996..b50ae8e 100644 }, { .name = "bfq.io_service_time_recursive", -@@ -1102,31 +1106,39 @@ static struct cftype bfqio_files[] = { +@@ -1102,31 +1103,42 @@ static struct cftype bfqio_files[] = { .private = offsetof(struct bfq_group, stats.dequeue), .seq_show = bfqg_print_stat, }, @@ -839,14 +1430,14 @@ index 0367996..b50ae8e 100644 +#else /* CONFIG_BFQ_GROUP_IOSCHED */ + +static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg, -+ struct bfq_queue *bfqq, int op, int op_flags) { } ++ struct bfq_queue *bfqq, unsigned int op) { } +static inline void -+bfqg_stats_update_io_remove(struct bfq_group *bfqg, int op, int op_flags) { } ++bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { } +static inline void -+bfqg_stats_update_io_merged(struct bfq_group *bfqg, int op, int op_flags) { } ++bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { } +static inline void bfqg_stats_update_completion(struct bfq_group *bfqg, -+ uint64_t start_time, uint64_t io_start_time, int op, -+ int op_flags) { } ++ uint64_t start_time, uint64_t io_start_time, ++ unsigned int op) { } +static inline void +bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, + struct bfq_group *curr_bfqg) { } @@ -856,20 +1447,32 @@ index 0367996..b50ae8e 100644 +static inline void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { } +static inline void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { } +static inline void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { } ++ ++static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, ++ struct bfq_group *bfqg) {} static void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) -@@ -1150,27 +1162,20 @@ bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) - return bfqd->root_group; +@@ -1142,35 +1154,22 @@ static void bfq_init_entity(struct bfq_entity *entity, + entity->sched_data = &bfqg->sched_data; } +-static struct bfq_group * +-bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) +-{ +- struct bfq_data *bfqd = bic_to_bfqd(bic); +- +- return bfqd->root_group; +-} +- -static void bfq_bfqq_move(struct bfq_data *bfqd, - struct bfq_queue *bfqq, - struct bfq_entity *entity, - struct bfq_group *bfqg) -{ -} -- ++static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {} + static void bfq_end_wr_async(struct bfq_data *bfqd) { bfq_end_wr_async_queues(bfqd, bfqd->root_group); @@ -893,17 +1496,24 @@ index 0367996..b50ae8e 100644 static struct bfq_group * diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c -index cf3e9b1..eef6ff4 100644 +index cf3e9b1..6d06c3c 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c -@@ -7,25 +7,28 @@ +@@ -1,5 +1,5 @@ + /* +- * Budget Fair Queueing (BFQ) disk scheduler. ++ * Budget Fair Queueing (BFQ) I/O scheduler. + * + * Based on ideas and code from CFQ: + * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> +@@ -7,25 +7,34 @@ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> * Paolo Valente <paolo.valente@unimore.it> * - * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it> + * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it> + * -+ * Copyright (C) 2016 Paolo Valente <paolo.valente@linaro.org> ++ * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org> * * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ * file. @@ -922,6 +1532,12 @@ index cf3e9b1..eef6ff4 100644 - * I/O-bound processes issuing sequential requests (to boost the - * throughput), and yet guarantee a low latency to interactive and soft - * real-time applications. ++ * BFQ is a proportional-share I/O scheduler, with some extra ++ * low-latency capabilities. BFQ also supports full hierarchical ++ * scheduling through cgroups. Next paragraphs provide an introduction ++ * on BFQ inner workings. Details on BFQ benefits and usage can be ++ * found in Documentation/block/bfq-iosched.txt. ++ * + * BFQ is a proportional-share storage-I/O scheduling algorithm based + * on the slice-by-slice service scheme of CFQ. But BFQ assigns + * budgets, measured in number of sectors, to processes instead of @@ -940,7 +1556,22 @@ index cf3e9b1..eef6ff4 100644 * * BFQ is described in [1], where also a reference to the initial, more * theoretical paper on BFQ can be found. The interested reader can find -@@ -70,8 +73,8 @@ +@@ -40,10 +49,10 @@ + * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N) + * complexity derives from the one introduced with EEVDF in [3]. + * +- * [1] P. Valente and M. Andreolini, ``Improving Application Responsiveness +- * with the BFQ Disk I/O Scheduler'', +- * Proceedings of the 5th Annual International Systems and Storage +- * Conference (SYSTOR '12), June 2012. ++ * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O ++ * Scheduler", Proceedings of the First Workshop on Mobile System ++ * Technologies (MST-2015), May 2015. ++ * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf + * + * http://algogroup.unimo.it/people/paolo/disk_sched/bf1-v1-suite-results.pdf + * +@@ -70,24 +79,23 @@ #include "bfq.h" #include "blk.h" @@ -950,34 +1581,37 @@ index cf3e9b1..eef6ff4 100644 +static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 }; /* Maximum backwards seek, in KiB. */ - static const int bfq_back_max = 16 * 1024; -@@ -79,15 +82,14 @@ static const int bfq_back_max = 16 * 1024; +-static const int bfq_back_max = 16 * 1024; ++static const int bfq_back_max = (16 * 1024); + /* Penalty of a backwards seek, in number of sectors. */ static const int bfq_back_penalty = 2; -/* Idling period duration, in jiffies. */ -static int bfq_slice_idle = HZ / 125; +/* Idling period duration, in ns. */ -+static u32 bfq_slice_idle = NSEC_PER_SEC / 125; ++static u32 bfq_slice_idle = (NSEC_PER_SEC / 125); /* Minimum number of assigned budgets for which stats are safe to compute. */ static const int bfq_stats_min_budgets = 194; /* Default maximum budget values, in sectors and number of requests. */ - static const int bfq_default_max_budget = 16 * 1024; +-static const int bfq_default_max_budget = 16 * 1024; -static const int bfq_max_budget_async_rq = 4; ++static const int bfq_default_max_budget = (16 * 1024); /* * Async to sync throughput distribution is controlled as follows: -@@ -97,23 +99,27 @@ static const int bfq_max_budget_async_rq = 4; +@@ -97,23 +105,28 @@ static const int bfq_max_budget_async_rq = 4; static const int bfq_async_charge_factor = 10; /* Default timeout values, in jiffies, approximating CFQ defaults. */ -static const int bfq_timeout_sync = HZ / 8; -static int bfq_timeout_async = HZ / 25; -+static const int bfq_timeout = HZ / 8; ++static const int bfq_timeout = (HZ / 8); - struct kmem_cache *bfq_pool; +-struct kmem_cache *bfq_pool; ++static struct kmem_cache *bfq_pool; -/* Below this threshold (in ms), we consider thinktime immediate. */ -#define BFQ_MIN_TT 2 @@ -991,6 +1625,7 @@ index cf3e9b1..eef6ff4 100644 -#define BFQQ_SEEK_THR (sector_t)(8 * 1024) -#define BFQQ_SEEKY(bfqq) ((bfqq)->seek_mean > BFQQ_SEEK_THR) +#define BFQQ_SEEK_THR (sector_t)(8 * 100) ++#define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32) +#define BFQQ_CLOSE_THR (sector_t)(8 * 1024) +#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8) @@ -999,13 +1634,13 @@ index cf3e9b1..eef6ff4 100644 +/* Min number of samples required to perform peak-rate update */ +#define BFQ_RATE_MIN_SAMPLES 32 +/* Min observation time interval required to perform a peak-rate update (ns) */ -+#define BFQ_RATE_MIN_INTERVAL 300*NSEC_PER_MSEC ++#define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC) +/* Target observation time interval for a peak-rate update (ns) */ +#define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC /* Shift used for peak rate fixed precision calculations. */ #define BFQ_RATE_SHIFT 16 -@@ -141,16 +147,24 @@ struct kmem_cache *bfq_pool; +@@ -141,16 +154,24 @@ struct kmem_cache *bfq_pool; * The device's speed class is dynamically (re)detected in * bfq_update_peak_rate() every time the estimated peak rate is updated. * @@ -1037,19 +1672,26 @@ index cf3e9b1..eef6ff4 100644 /* * To improve readability, a conversion function is used to initialize the * following arrays, which entails that they can be initialized only in a -@@ -183,10 +197,7 @@ static void bfq_schedule_dispatch(struct bfq_data *bfqd); - */ - static int bfq_bio_sync(struct bio *bio) - { +@@ -178,18 +199,6 @@ static void bfq_schedule_dispatch(struct bfq_data *bfqd); + #define bfq_sample_valid(samples) ((samples) > 80) + + /* +- * We regard a request as SYNC, if either it's a read or has the SYNC bit +- * set (in which case it could also be a direct WRITE). +- */ +-static int bfq_bio_sync(struct bio *bio) +-{ - if (bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC)) - return 1; - - return 0; -+ return bio_data_dir(bio) == READ || (bio->bi_opf & REQ_SYNC); - } - - /* -@@ -409,11 +420,7 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd) +-} +- +-/* + * Scheduler run of queue, if there are requests pending and no one in the + * driver that will restart queueing. + */ +@@ -409,11 +418,7 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd) */ static bool bfq_symmetric_scenario(struct bfq_data *bfqd) { @@ -1062,7 +1704,77 @@ index cf3e9b1..eef6ff4 100644 } /* -@@ -533,9 +540,19 @@ static struct request *bfq_find_next_rq(struct bfq_data *bfqd, +@@ -469,6 +474,22 @@ static void bfq_weights_tree_add(struct bfq_data *bfqd, + + entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter), + GFP_ATOMIC); ++ ++ /* ++ * In the unlucky event of an allocation failure, we just ++ * exit. This will cause the weight of entity to not be ++ * considered in bfq_differentiated_weights, which, in its ++ * turn, causes the scenario to be deemed wrongly symmetric in ++ * case entity's weight would have been the only weight making ++ * the scenario asymmetric. On the bright side, no unbalance ++ * will however occur when entity becomes inactive again (the ++ * invocation of this function is triggered by an activation ++ * of entity). In fact, bfq_weights_tree_remove does nothing ++ * if !entity->weight_counter. ++ */ ++ if (unlikely(!entity->weight_counter)) ++ return; ++ + entity->weight_counter->weight = entity->weight; + rb_link_node(&entity->weight_counter->weights_node, parent, new); + rb_insert_color(&entity->weight_counter->weights_node, root); +@@ -505,13 +526,45 @@ static void bfq_weights_tree_remove(struct bfq_data *bfqd, + entity->weight_counter = NULL; + } + ++/* ++ * Return expired entry, or NULL to just start from scratch in rbtree. ++ */ ++static struct request *bfq_check_fifo(struct bfq_queue *bfqq, ++ struct request *last) ++{ ++ struct request *rq; ++ ++ if (bfq_bfqq_fifo_expire(bfqq)) ++ return NULL; ++ ++ bfq_mark_bfqq_fifo_expire(bfqq); ++ ++ rq = rq_entry_fifo(bfqq->fifo.next); ++ ++ if (rq == last || ktime_get_ns() < rq->fifo_time) ++ return NULL; ++ ++ bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq); ++ BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); ++ return rq; ++} ++ + static struct request *bfq_find_next_rq(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + struct request *last) + { + struct rb_node *rbnext = rb_next(&last->rb_node); + struct rb_node *rbprev = rb_prev(&last->rb_node); +- struct request *next = NULL, *prev = NULL; ++ struct request *next, *prev = NULL; ++ ++ BUG_ON(list_empty(&bfqq->fifo)); ++ ++ /* Follow expired path, else get first next available. */ ++ next = bfq_check_fifo(bfqq, last); ++ if (next) { ++ BUG_ON(next == last); ++ return next; ++ } + + BUG_ON(RB_EMPTY_NODE(&last->rb_node)); + +@@ -533,9 +586,19 @@ static struct request *bfq_find_next_rq(struct bfq_data *bfqd, static unsigned long bfq_serv_to_charge(struct request *rq, struct bfq_queue *bfqq) { @@ -1085,7 +1797,16 @@ index cf3e9b1..eef6ff4 100644 } /** -@@ -590,12 +607,23 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd) +@@ -576,7 +639,7 @@ static void bfq_updated_next_req(struct bfq_data *bfqd, + entity->budget = new_budget; + bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", + new_budget); +- bfq_activate_bfqq(bfqd, bfqq); ++ bfq_requeue_bfqq(bfqd, bfqq); + } + } + +@@ -590,12 +653,23 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd) dur = bfqd->RT_prod; do_div(dur, bfqd->peak_rate); @@ -1114,7 +1835,7 @@ index cf3e9b1..eef6ff4 100644 } static void -@@ -605,31 +633,28 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic) +@@ -605,31 +679,31 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic) bfq_mark_bfqq_idle_window(bfqq); else bfq_clear_bfqq_idle_window(bfqq); @@ -1142,13 +1863,16 @@ index cf3e9b1..eef6ff4 100644 + bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt; + BUG_ON(time_is_after_jiffies(bfqq->wr_start_at_switch_to_srt)); + bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish; ++ bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time; + BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); + + if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) || + time_is_before_jiffies(bfqq->last_wr_start_finish + + bfqq->wr_cur_max_time))) { + bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "resume state: switching off wr"); ++ "resume state: switching off wr (%lu + %lu < %lu)", ++ bfqq->last_wr_start_finish, bfqq->wr_cur_max_time, ++ jiffies); + + bfqq->wr_coeff = 1; } @@ -1163,7 +1887,7 @@ index cf3e9b1..eef6ff4 100644 } static int bfqq_process_refs(struct bfq_queue *bfqq) -@@ -639,7 +664,7 @@ static int bfqq_process_refs(struct bfq_queue *bfqq) +@@ -639,7 +713,7 @@ static int bfqq_process_refs(struct bfq_queue *bfqq) lockdep_assert_held(bfqq->bfqd->queue->queue_lock); io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE]; @@ -1172,7 +1896,7 @@ index cf3e9b1..eef6ff4 100644 BUG_ON(process_refs < 0); return process_refs; } -@@ -654,6 +679,7 @@ static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -654,6 +728,7 @@ static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq) hlist_del_init(&item->burst_list_node); hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); bfqd->burst_size = 1; @@ -1180,7 +1904,7 @@ index cf3e9b1..eef6ff4 100644 } /* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */ -@@ -662,6 +688,10 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -662,6 +737,10 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) /* Increment burst size to take into account also bfqq */ bfqd->burst_size++; @@ -1191,7 +1915,7 @@ index cf3e9b1..eef6ff4 100644 if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) { struct bfq_queue *pos, *bfqq_item; struct hlist_node *n; -@@ -671,15 +701,19 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -671,15 +750,19 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) * other to consider this burst as large. */ bfqd->large_burst = true; @@ -1212,7 +1936,7 @@ index cf3e9b1..eef6ff4 100644 /* * From now on, and until the current burst finishes, any -@@ -691,67 +725,79 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -691,67 +774,79 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) hlist_for_each_entry_safe(pos, n, &bfqd->burst_list, burst_list_node) hlist_del_init(&pos->burst_list_node); @@ -1326,8 +2050,7 @@ index cf3e9b1..eef6ff4 100644 + * enjoy weight raising as expected. Fortunately these false positives + * are very rare. They typically occur if some service happens to + * start doing I/O exactly when the interactive task starts. - * -- * . when the very first queue is activated, the queue is inserted into the ++ * + * Turning back to the next function, it implements all the steps + * needed to detect the occurrence of a large burst and to properly + * mark all the queues belonging to it (so that they can then be @@ -1336,12 +2059,13 @@ index cf3e9b1..eef6ff4 100644 + * burst in progress. The list is then used to mark these queues as + * belonging to a large burst if the burst does become large. The main + * steps are the following. -+ * + * +- * . when the very first queue is activated, the queue is inserted into the + * . when the very first queue is created, the queue is inserted into the * list (as it could be the first queue in a possible burst) * * . if the current burst has not yet become large, and a queue Q that does -@@ -772,13 +818,13 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -772,13 +867,13 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) * * . the device enters a large-burst mode * @@ -1357,7 +2081,7 @@ index cf3e9b1..eef6ff4 100644 * later, i.e., not shortly after, than the last time at which a queue * either entered the burst list or was marked as belonging to the * current large burst, then the current burst is deemed as finished and: -@@ -791,52 +837,44 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -791,52 +886,44 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) * in a possible new burst (then the burst list contains just Q * after this step). */ @@ -1434,7 +2158,7 @@ index cf3e9b1..eef6ff4 100644 } /* -@@ -845,8 +883,9 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -845,8 +932,9 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq, * bfqq as belonging to this large burst immediately. */ if (bfqd->large_burst) { @@ -1445,7 +2169,7 @@ index cf3e9b1..eef6ff4 100644 } /* -@@ -855,25 +894,491 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -855,25 +943,489 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq, * queue. Then we add bfqq to the burst. */ bfq_add_to_burst(bfqd, bfqq); @@ -1783,8 +2507,7 @@ index cf3e9b1..eef6ff4 100644 + BUG_ON(bfqq->entity.budget < bfqq->entity.service); + + BUG_ON(bfqq == bfqd->in_service_queue); -+ bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, -+ req_op(rq), rq->cmd_flags); ++ bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags); + + /* + * bfqq deserves to be weight-raised if: @@ -1904,7 +2627,6 @@ index cf3e9b1..eef6ff4 100644 + + bfq_bfqq_expire(bfqd, bfqd->in_service_queue, + false, BFQ_BFQQ_PREEMPTED); -+ BUG_ON(in_serv->entity.budget < 0); + } } @@ -1941,7 +2663,7 @@ index cf3e9b1..eef6ff4 100644 */ prev = bfqq->next_rq; next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position); -@@ -886,160 +1391,10 @@ static void bfq_add_request(struct request *rq) +@@ -886,160 +1438,10 @@ static void bfq_add_request(struct request *rq) if (prev != bfqq->next_rq) bfq_pos_tree_add_move(bfqd, bfqq); @@ -2106,7 +2828,7 @@ index cf3e9b1..eef6ff4 100644 if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) && time_is_before_jiffies( bfqq->last_wr_start_finish + -@@ -1048,16 +1403,43 @@ add_bfqq_busy: +@@ -1048,16 +1450,43 @@ static void bfq_add_request(struct request *rq) bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); bfqd->wr_busy_queues++; @@ -2154,12 +2876,22 @@ index cf3e9b1..eef6ff4 100644 if (bfqd->low_latency && (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive)) bfqq->last_wr_start_finish = jiffies; -@@ -1081,14 +1463,24 @@ static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, +@@ -1074,21 +1503,31 @@ static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, + if (!bic) + return NULL; + +- bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio)); ++ bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf)); + if (bfqq) + return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio)); + return NULL; } +-static void bfq_activate_request(struct request_queue *q, struct request *rq) +static sector_t get_sdist(sector_t last_pos, struct request *rq) -+{ + { +- struct bfq_data *bfqd = q->elevator->elevator_data; + sector_t sdist = 0; + + if (last_pos) { @@ -2171,11 +2903,10 @@ index cf3e9b1..eef6ff4 100644 + + return sdist; +} -+ - static void bfq_activate_request(struct request_queue *q, struct request *rq) - { - struct bfq_data *bfqd = q->elevator->elevator_data; -- + ++static void bfq_activate_request(struct request_queue *q, struct request *rq) ++{ ++ struct bfq_data *bfqd = q->elevator->elevator_data; bfqd->rq_in_driver++; - bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); - bfq_log(bfqd, "activate_request: new bfqd->last_position %llu", @@ -2183,7 +2914,7 @@ index cf3e9b1..eef6ff4 100644 } static void bfq_deactivate_request(struct request_queue *q, struct request *rq) -@@ -1105,6 +1497,9 @@ static void bfq_remove_request(struct request *rq) +@@ -1105,6 +1544,9 @@ static void bfq_remove_request(struct request *rq) struct bfq_data *bfqd = bfqq->bfqd; const int sync = rq_is_sync(rq); @@ -2193,17 +2924,21 @@ index cf3e9b1..eef6ff4 100644 if (bfqq->next_rq == rq) { bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq); bfq_updated_next_req(bfqd, bfqq); -@@ -1118,8 +1513,25 @@ static void bfq_remove_request(struct request *rq) +@@ -1118,8 +1560,29 @@ static void bfq_remove_request(struct request *rq) elv_rb_del(&bfqq->sort_list, rq); if (RB_EMPTY_ROOT(&bfqq->sort_list)) { - if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) +- bfq_del_bfqq_busy(bfqd, bfqq, 1); ++ bfqq->next_rq = NULL; ++ + BUG_ON(bfqq->entity.budget < 0); + + if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) { - bfq_del_bfqq_busy(bfqd, bfqq, 1); -+ -+ /* bfqq emptied. In normal operation, when ++ BUG_ON(bfqq->ref < 2); /* referred by rq and on tree */ ++ bfq_del_bfqq_busy(bfqd, bfqq, false); ++ /* ++ * bfqq emptied. In normal operation, when + * bfqq is empty, bfqq->entity.service and + * bfqq->entity.budget must contain, + * respectively, the service received and the @@ -2212,7 +2947,8 @@ index cf3e9b1..eef6ff4 100644 + * this last removal occurred while bfqq is + * not in service. To avoid inconsistencies, + * reset both bfqq->entity.service and -+ * bfqq->entity.budget. ++ * bfqq->entity.budget, if bfqq has still a ++ * process that may issue I/O requests to it. + */ + bfqq->entity.budget = bfqq->entity.service = 0; + } @@ -2220,19 +2956,17 @@ index cf3e9b1..eef6ff4 100644 /* * Remove queue from request-position tree as it is empty. */ -@@ -1133,9 +1545,8 @@ static void bfq_remove_request(struct request *rq) +@@ -1133,9 +1596,7 @@ static void bfq_remove_request(struct request *rq) BUG_ON(bfqq->meta_pending == 0); bfqq->meta_pending--; } -#ifdef CONFIG_BFQ_GROUP_IOSCHED -- bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags); + bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags); -#endif -+ bfqg_stats_update_io_remove(bfqq_group(bfqq), req_op(rq), -+ rq->cmd_flags); } static int bfq_merge(struct request_queue *q, struct request **req, -@@ -1145,7 +1556,7 @@ static int bfq_merge(struct request_queue *q, struct request **req, +@@ -1145,7 +1606,7 @@ static int bfq_merge(struct request_queue *q, struct request **req, struct request *__rq; __rq = bfq_find_rq_fmerge(bfqd, bio); @@ -2241,17 +2975,16 @@ index cf3e9b1..eef6ff4 100644 *req = __rq; return ELEVATOR_FRONT_MERGE; } -@@ -1190,7 +1601,8 @@ static void bfq_merged_request(struct request_queue *q, struct request *req, +@@ -1190,7 +1651,7 @@ static void bfq_merged_request(struct request_queue *q, struct request *req, static void bfq_bio_merged(struct request_queue *q, struct request *req, struct bio *bio) { - bfqg_stats_update_io_merged(bfqq_group(RQ_BFQQ(req)), bio->bi_rw); -+ bfqg_stats_update_io_merged(bfqq_group(RQ_BFQQ(req)), bio_op(bio), -+ bio->bi_opf); ++ bfqg_stats_update_io_merged(bfqq_group(RQ_BFQQ(req)), bio->bi_opf); } #endif -@@ -1210,7 +1622,7 @@ static void bfq_merged_requests(struct request_queue *q, struct request *rq, +@@ -1210,7 +1671,7 @@ static void bfq_merged_requests(struct request_queue *q, struct request *rq, */ if (bfqq == next_bfqq && !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && @@ -2260,15 +2993,13 @@ index cf3e9b1..eef6ff4 100644 list_del_init(&rq->queuelist); list_replace_init(&next->queuelist, &rq->queuelist); rq->fifo_time = next->fifo_time; -@@ -1220,21 +1632,31 @@ static void bfq_merged_requests(struct request_queue *q, struct request *rq, +@@ -1220,21 +1681,30 @@ static void bfq_merged_requests(struct request_queue *q, struct request *rq, bfqq->next_rq = rq; bfq_remove_request(next); -#ifdef CONFIG_BFQ_GROUP_IOSCHED -- bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags); + bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags); -#endif -+ bfqg_stats_update_io_merged(bfqq_group(bfqq), req_op(next), -+ next->cmd_flags); } /* Must be called with bfqq != NULL */ @@ -2296,7 +3027,7 @@ index cf3e9b1..eef6ff4 100644 } static void bfq_end_wr_async_queues(struct bfq_data *bfqd, -@@ -1277,7 +1699,7 @@ static int bfq_rq_close_to_sector(void *io_struct, bool request, +@@ -1277,7 +1747,7 @@ static int bfq_rq_close_to_sector(void *io_struct, bool request, sector_t sector) { return abs(bfq_io_struct_pos(io_struct, request) - sector) <= @@ -2305,7 +3036,7 @@ index cf3e9b1..eef6ff4 100644 } static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd, -@@ -1399,7 +1821,7 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) +@@ -1399,7 +1869,7 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) * throughput. */ bfqq->new_bfqq = new_bfqq; @@ -2314,7 +3045,7 @@ index cf3e9b1..eef6ff4 100644 return new_bfqq; } -@@ -1430,9 +1852,23 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, +@@ -1430,9 +1900,23 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, } /* @@ -2327,7 +3058,7 @@ index cf3e9b1..eef6ff4 100644 + * positives. In case bfqq is weight-raised, such false positives + * would evidently degrade latency guarantees for bfqq. + */ -+bool wr_from_too_long(struct bfq_queue *bfqq) ++static bool wr_from_too_long(struct bfq_queue *bfqq) +{ + return bfqq->wr_coeff > 1 && + time_is_before_jiffies(bfqq->last_wr_start_finish + @@ -2341,7 +3072,7 @@ index cf3e9b1..eef6ff4 100644 * structure otherwise. * * The OOM queue is not allowed to participate to cooperation: in fact, since -@@ -1441,6 +1877,18 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, +@@ -1441,6 +1925,18 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, * handle merging with the OOM queue would be quite complex and expensive * to maintain. Besides, in such a critical condition as an out of memory, * the benefits of queue merging may be little relevant, or even negligible. @@ -2360,7 +3091,7 @@ index cf3e9b1..eef6ff4 100644 */ static struct bfq_queue * bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, -@@ -1450,16 +1898,32 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -1450,16 +1946,32 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, if (bfqq->new_bfqq) return bfqq->new_bfqq; @@ -2396,7 +3127,7 @@ index cf3e9b1..eef6ff4 100644 unlikely(in_service_bfqq == &bfqd->oom_bfqq)) goto check_scheduled; -@@ -1481,7 +1945,15 @@ check_scheduled: +@@ -1481,7 +1993,15 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, BUG_ON(new_bfqq && bfqq->entity.parent != new_bfqq->entity.parent); @@ -2413,7 +3144,7 @@ index cf3e9b1..eef6ff4 100644 bfq_may_be_close_cooperator(bfqq, new_bfqq)) return bfq_setup_merge(bfqq, new_bfqq); -@@ -1490,53 +1962,24 @@ check_scheduled: +@@ -1490,53 +2010,25 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, static void bfq_bfqq_save_state(struct bfq_queue *bfqq) { @@ -2475,11 +3206,12 @@ index cf3e9b1..eef6ff4 100644 + bic->saved_wr_coeff = bfqq->wr_coeff; + bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt; + bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish; ++ bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time; + BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); } static void bfq_get_bic_reference(struct bfq_queue *bfqq) -@@ -1561,6 +2004,40 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, +@@ -1561,6 +2053,41 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, if (bfq_bfqq_IO_bound(bfqq)) bfq_mark_bfqq_IO_bound(new_bfqq); bfq_clear_bfqq_IO_bound(bfqq); @@ -2497,7 +3229,8 @@ index cf3e9b1..eef6ff4 100644 + new_bfqq->wr_coeff = bfqq->wr_coeff; + new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time; + new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish; -+ new_bfqq->wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt; ++ new_bfqq->wr_start_at_switch_to_srt = ++ bfqq->wr_start_at_switch_to_srt; + if (bfq_bfqq_busy(new_bfqq)) + bfqd->wr_busy_queues++; + new_bfqq->entity.prio_changed = 1; @@ -2520,7 +3253,11 @@ index cf3e9b1..eef6ff4 100644 /* * Grab a reference to the bic, to prevent it from being destroyed * before being possibly touched by a bfq_split_bfqq(). -@@ -1587,20 +2064,8 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, +@@ -1584,33 +2111,23 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, + */ + new_bfqq->bic = NULL; + bfqq->bic = NULL; ++ /* release process reference to bfqq */ bfq_put_queue(bfqq); } @@ -2542,26 +3279,33 @@ index cf3e9b1..eef6ff4 100644 + struct bio *bio) { struct bfq_data *bfqd = q->elevator->elevator_data; ++ bool is_sync = op_is_sync(bio->bi_opf); struct bfq_io_cq *bic; -@@ -1610,7 +2075,7 @@ static int bfq_allow_merge(struct request_queue *q, struct request *rq, + struct bfq_queue *bfqq, *new_bfqq; + + /* * Disallow merge of a sync bio into an async request. */ - if (bfq_bio_sync(bio) && !rq_is_sync(rq)) +- if (bfq_bio_sync(bio) && !rq_is_sync(rq)) - return 0; ++ if (is_sync && !rq_is_sync(rq)) + return false; /* * Lookup the bfqq that this bio will be queued with. Allow -@@ -1619,7 +2084,7 @@ static int bfq_allow_merge(struct request_queue *q, struct request *rq, +@@ -1619,9 +2136,9 @@ static int bfq_allow_merge(struct request_queue *q, struct request *rq, */ bic = bfq_bic_lookup(bfqd, current->io_context); if (!bic) - return 0; + return false; - bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio)); +- bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio)); ++ bfqq = bic_to_bfqq(bic, is_sync); /* -@@ -1636,30 +2101,107 @@ static int bfq_allow_merge(struct request_queue *q, struct request *rq, + * We take advantage of this function to perform an early merge + * of the queues of possible cooperating processes. +@@ -1636,30 +2153,111 @@ static int bfq_allow_merge(struct request_queue *q, struct request *rq, * to decide whether bio and rq can be merged. */ bfqq = new_bfqq; @@ -2648,9 +3392,13 @@ index cf3e9b1..eef6ff4 100644 + * not only expires, but also remains with no + * request. + */ -+ bfqq->last_wr_start_finish += jiffies - -+ max_t(unsigned long, bfqq->last_wr_start_finish, -+ bfqq->budget_timeout); ++ if (time_after(bfqq->budget_timeout, ++ bfqq->last_wr_start_finish)) ++ bfqq->last_wr_start_finish += ++ jiffies - bfqq->budget_timeout; ++ else ++ bfqq->last_wr_start_finish = jiffies; ++ + if (time_is_after_jiffies(bfqq->last_wr_start_finish)) { + pr_crit( + "BFQ WARNING:last %lu budget %lu jiffies %lu", @@ -2675,7 +3423,7 @@ index cf3e9b1..eef6ff4 100644 bfqd->in_service_queue = bfqq; } -@@ -1675,36 +2217,11 @@ static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd) +@@ -1675,36 +2273,11 @@ static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd) return bfqq; } @@ -2713,7 +3461,7 @@ index cf3e9b1..eef6ff4 100644 BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); -@@ -1728,59 +2245,343 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd) +@@ -1728,119 +2301,366 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd) sl = bfqd->bfq_slice_idle; /* * Unless the queue is being weight-raised or the scenario is @@ -2773,12 +3521,14 @@ index cf3e9b1..eef6ff4 100644 - if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) - timeout_coeff = 1; +- else +- timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight; +/* + * Update parameters related to throughput and responsiveness, as a + * function of the estimated peak rate. See comments on + * bfq_calc_max_budget(), and on T_slow and T_fast arrays. + */ -+void update_thr_responsiveness_params(struct bfq_data *bfqd) ++static void update_thr_responsiveness_params(struct bfq_data *bfqd) +{ + int dev_type = blk_queue_nonrot(bfqd->queue); + @@ -2789,7 +3539,8 @@ index cf3e9b1..eef6ff4 100644 + bfq_log(bfqd, "new max_budget = %d", + bfqd->bfq_max_budget); + } -+ + +- bfqd->last_budget_start = ktime_get(); + if (bfqd->device_speed == BFQ_BFQD_FAST && + bfqd->peak_rate < device_speed_thresh[dev_type]) { + bfqd->device_speed = BFQ_BFQD_SLOW; @@ -2801,7 +3552,10 @@ index cf3e9b1..eef6ff4 100644 + bfqd->RT_prod = R_fast[dev_type] * + T_fast[dev_type]; + } -+ + +- bfq_clear_bfqq_budget_new(bfqq); +- bfqq->budget_timeout = jiffies + +- bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * timeout_coeff; + bfq_log(bfqd, +"dev_type %s dev_speed_class = %s (%llu sects/sec), thresh %llu setcs/sec", + dev_type == 0 ? "ROT" : "NONROT", @@ -2812,8 +3566,11 @@ index cf3e9b1..eef6ff4 100644 + (USEC_PER_SEC*(u64)device_speed_thresh[dev_type])>> + BFQ_RATE_SHIFT); +} -+ -+void bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq) + +- bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u", +- jiffies_to_msecs(bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * +- timeout_coeff)); ++static void bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq) +{ + if (rq != NULL) { /* new rq dispatch now, reset accordingly */ + bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns() ; @@ -2828,20 +3585,37 @@ index cf3e9b1..eef6ff4 100644 + "reset_rate_computation at end, sample %u/%u tot_sects %llu", + bfqd->peak_rate_samples, bfqd->sequential_samples, + bfqd->tot_sectors_dispatched); -+} -+ -+void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) -+{ + } + +-/* +- * Move request from internal lists to the request queue dispatch list. +- */ +-static void bfq_dispatch_insert(struct request_queue *q, struct request *rq) ++static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) + { +- struct bfq_data *bfqd = q->elevator->elevator_data; +- struct bfq_queue *bfqq = RQ_BFQQ(rq); + u32 rate, weight, divisor; -+ -+ /* + + /* +- * For consistency, the next instruction should have been executed +- * after removing the request from the queue and dispatching it. +- * We execute instead this instruction before bfq_remove_request() +- * (and hence introduce a temporary inconsistency), for efficiency. +- * In fact, in a forced_dispatch, this prevents two counters related +- * to bfqq->dispatched to risk to be uselessly decremented if bfqq +- * is not in service, and then to be incremented again after +- * incrementing bfqq->dispatched. + * For the convergence property to hold (see comments on + * bfq_update_peak_rate()) and for the assessment to be + * reliable, a minimum number of samples must be present, and + * a minimum amount of time must have elapsed. If not so, do + * not compute new rate. Just reset parameters, to get ready + * for a new evaluation attempt. -+ */ + */ +- bfqq->dispatched++; +- bfq_remove_request(rq); +- elv_dispatch_sort(q, rq); + if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES || + bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) { + bfq_log(bfqd, @@ -2849,7 +3623,13 @@ index cf3e9b1..eef6ff4 100644 + bfqd->delta_from_first>>10, bfqd->peak_rate_samples); + goto reset_computation; + } -+ + +- if (bfq_bfqq_sync(bfqq)) +- bfqd->sync_flight++; +-#ifdef CONFIG_BFQ_GROUP_IOSCHED +- bfqg_stats_update_dispatch(bfqq_group(bfqq), blk_rq_bytes(rq), +- rq->cmd_flags); +-#endif + /* + * If a new request completion has occurred after last + * dispatch, then, to approximate the rate at which requests @@ -2880,7 +3660,7 @@ index cf3e9b1..eef6ff4 100644 + * total, and rate is below the current estimated peak rate + * - rate is unreasonably high (> 20M sectors/sec) + */ -+ if ((bfqd->peak_rate_samples > (3 * bfqd->sequential_samples)>>2 && ++ if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 && + rate <= bfqd->peak_rate) || + rate > 20<<BFQ_RATE_SHIFT) { + bfq_log(bfqd, @@ -2961,9 +3741,10 @@ index cf3e9b1..eef6ff4 100644 + +reset_computation: + bfq_reset_rate_computation(bfqd, rq); -+} -+ -+/* + } + + /* +- * Return expired entry, or NULL to just start from scratch in rbtree. + * Update the read/write peak rate (the main quantity used for + * auto-tuning, see update_thr_responsiveness_params()). + * @@ -2994,9 +3775,11 @@ index cf3e9b1..eef6ff4 100644 + * the next function to estimate the peak service rate as a function + * of the observed dispatch rate. The function assumes to be invoked + * on every request dispatch. -+ */ -+void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) -+{ + */ +-static struct request *bfq_check_fifo(struct bfq_queue *bfqq) ++static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) + { +- struct request *rq = NULL; + u64 now_ns = ktime_get_ns(); + + if (bfqd->peak_rate_samples == 0) { /* first dispatch */ @@ -3006,7 +3789,9 @@ index cf3e9b1..eef6ff4 100644 + bfq_reset_rate_computation(bfqd, rq); + goto update_last_values; /* will add one sample */ + } -+ + +- if (bfq_bfqq_fifo_expire(bfqq)) +- return NULL; + /* + * Device idle for very long: the observation interval lasting + * up to this dispatch cannot be a valid observation interval @@ -3027,40 +3812,39 @@ index cf3e9b1..eef6ff4 100644 + bfqd->peak_rate_samples) ; + goto update_rate_and_reset; + } -+ + +- bfq_mark_bfqq_fifo_expire(bfqq); + /* Update sampling information */ + bfqd->peak_rate_samples++; -+ + +- if (list_empty(&bfqq->fifo)) +- return NULL; + if ((bfqd->rq_in_driver > 0 || + now_ns - bfqd->last_completion < BFQ_MIN_TT) + && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR) + bfqd->sequential_samples++; -+ + +- rq = rq_entry_fifo(bfqq->fifo.next); + bfqd->tot_sectors_dispatched += blk_rq_sectors(rq); -+ + +- if (time_before(jiffies, rq->fifo_time)) +- return NULL; + /* Reset max observed rq size every 32 dispatches */ + if (likely(bfqd->peak_rate_samples % 32)) + bfqd->last_rq_max_size = + max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size); - else -- timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight; ++ else + bfqd->last_rq_max_size = blk_rq_sectors(rq); -- bfqd->last_budget_start = ktime_get(); +- return rq; + bfqd->delta_from_first = now_ns - bfqd->first_dispatch; - -- bfq_clear_bfqq_budget_new(bfqq); -- bfqq->budget_timeout = jiffies + -- bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * timeout_coeff; ++ + bfq_log(bfqd, + "update_peak_rate: added samples %u/%u tot_sects %llu delta_first %lluus", + bfqd->peak_rate_samples, bfqd->sequential_samples, + bfqd->tot_sectors_dispatched, + bfqd->delta_from_first>>10); - -- bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u", -- jiffies_to_msecs(bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * -- timeout_coeff)); ++ + /* Target observation interval not yet reached, go on sampling */ + if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL) + goto update_last_values; @@ -3080,56 +3864,43 @@ index cf3e9b1..eef6ff4 100644 + "update_peak_rate: samples at end %d", bfqd->peak_rate_samples); } - /* -- * Move request from internal lists to the request queue dispatch list. +-static int bfq_bfqq_budget_left(struct bfq_queue *bfqq) ++/* + * Move request from internal lists to the dispatch list of the request queue - */ - static void bfq_dispatch_insert(struct request_queue *q, struct request *rq) ++ */ ++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq) { -- struct bfq_data *bfqd = q->elevator->elevator_data; - struct bfq_queue *bfqq = RQ_BFQQ(rq); +- struct bfq_entity *entity = &bfqq->entity; ++ struct bfq_queue *bfqq = RQ_BFQQ(rq); - /* -@@ -1794,15 +2595,10 @@ static void bfq_dispatch_insert(struct request_queue *q, struct request *rq) - * incrementing bfqq->dispatched. - */ - bfqq->dispatched++; +- return entity->budget - entity->service; ++ /* ++ * For consistency, the next instruction should have been executed ++ * after removing the request from the queue and dispatching it. ++ * We execute instead this instruction before bfq_remove_request() ++ * (and hence introduce a temporary inconsistency), for efficiency. ++ * In fact, in a forced_dispatch, this prevents two counters related ++ * to bfqq->dispatched to risk to be uselessly decremented if bfqq ++ * is not in service, and then to be incremented again after ++ * incrementing bfqq->dispatched. ++ */ ++ bfqq->dispatched++; + bfq_update_peak_rate(q->elevator->elevator_data, rq); + - bfq_remove_request(rq); - elv_dispatch_sort(q, rq); -- -- if (bfq_bfqq_sync(bfqq)) -- bfqd->sync_flight++; --#ifdef CONFIG_BFQ_GROUP_IOSCHED -- bfqg_stats_update_dispatch(bfqq_group(bfqq), blk_rq_bytes(rq), -- rq->cmd_flags); --#endif ++ bfq_remove_request(rq); ++ elv_dispatch_sort(q, rq); } - /* -@@ -1822,19 +2618,12 @@ static struct request *bfq_check_fifo(struct bfq_queue *bfqq) - - rq = rq_entry_fifo(bfqq->fifo.next); + static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) + { + BUG_ON(bfqq != bfqd->in_service_queue); -- if (time_before(jiffies, rq->fifo_time)) -+ if (ktime_get_ns() < rq->fifo_time) - return NULL; - - return rq; - } - --static int bfq_bfqq_budget_left(struct bfq_queue *bfqq) --{ -- struct bfq_entity *entity = &bfqq->entity; -- -- return entity->budget - entity->service; --} +- __bfq_bfqd_reset_in_service(bfqd); - - static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) - { - BUG_ON(bfqq != bfqd->in_service_queue); -@@ -1851,12 +2640,15 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) + /* + * If this bfqq is shared between multiple processes, check + * to make sure that those processes are still issuing I/Os +@@ -1851,20 +2671,30 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfq_mark_bfqq_split_coop(bfqq); if (RB_EMPTY_ROOT(&bfqq->sort_list)) { @@ -3139,6 +3910,7 @@ index cf3e9b1..eef6ff4 100644 - * the weight-raising mechanism. - */ - bfqq->budget_timeout = jiffies; +- bfq_del_bfqq_busy(bfqd, bfqq, 1); + if (bfqq->dispatched == 0) + /* + * Overloading budget_timeout field to store @@ -3148,10 +3920,26 @@ index cf3e9b1..eef6ff4 100644 + */ + bfqq->budget_timeout = jiffies; + - bfq_del_bfqq_busy(bfqd, bfqq, 1); ++ bfq_del_bfqq_busy(bfqd, bfqq, true); } else { - bfq_activate_bfqq(bfqd, bfqq); -@@ -1883,10 +2675,19 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, +- bfq_activate_bfqq(bfqd, bfqq); ++ bfq_requeue_bfqq(bfqd, bfqq); + /* + * Resort priority tree of potential close cooperators. + */ + bfq_pos_tree_add_move(bfqd, bfqq); + } ++ ++ /* ++ * All in-service entities must have been properly deactivated ++ * or requeued before executing the next function, which ++ * resets all in-service entites as no more in service. ++ */ ++ __bfq_bfqd_reset_in_service(bfqd); + } + + /** +@@ -1883,10 +2713,19 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, struct request *next_rq; int budget, min_budget; @@ -3173,7 +3961,7 @@ index cf3e9b1..eef6ff4 100644 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d", bfqq->entity.budget, bfq_bfqq_budget_left(bfqq)); -@@ -1895,7 +2696,7 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, +@@ -1895,7 +2734,7 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d", bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue)); @@ -3182,7 +3970,7 @@ index cf3e9b1..eef6ff4 100644 switch (reason) { /* * Caveat: in all the following cases we trade latency -@@ -1937,14 +2738,10 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, +@@ -1937,14 +2776,10 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, break; case BFQ_BFQQ_BUDGET_TIMEOUT: /* @@ -3201,7 +3989,7 @@ index cf3e9b1..eef6ff4 100644 */ budget = min(budget * 2, bfqd->bfq_max_budget); break; -@@ -1961,17 +2758,49 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, +@@ -1961,17 +2796,49 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, budget = min(budget * 4, bfqd->bfq_max_budget); break; case BFQ_BFQQ_NO_MORE_REQUESTS: @@ -3258,7 +4046,7 @@ index cf3e9b1..eef6ff4 100644 */ budget = bfqd->bfq_max_budget; -@@ -1982,160 +2811,120 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, +@@ -1982,160 +2849,120 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget); /* @@ -3362,7 +4150,8 @@ index cf3e9b1..eef6ff4 100644 if (compensate) - delta = bfqd->last_idling_start; -- else ++ delta_ktime = bfqd->last_idling_start; + else - delta = ktime_get(); - delta = ktime_sub(delta, bfqd->last_budget_start); - usecs = ktime_to_us(delta); @@ -3379,13 +4168,35 @@ index cf3e9b1..eef6ff4 100644 - */ - bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT; - do_div(bw, (unsigned long)usecs); -- ++ delta_ktime = ktime_get(); ++ delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start); ++ delta_usecs = ktime_to_us(delta_ktime); ++ ++ /* don't use too short time intervals */ ++ if (delta_usecs < 1000) { ++ if (blk_queue_nonrot(bfqd->queue)) ++ /* ++ * give same worst-case guarantees as idling ++ * for seeky ++ */ ++ *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC; ++ else /* charge at least one seek */ ++ *delta_ms = bfq_slice_idle / NSEC_PER_MSEC; ++ ++ bfq_log(bfqd, "bfq_bfqq_is_slow: too short %u", delta_usecs); ++ ++ return slow; ++ } + - timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]); -- -- /* ++ *delta_ms = delta_usecs / USEC_PER_MSEC; + + /* - * Use only long (> 20ms) intervals to filter out spikes for - * the peak rate estimation. -- */ ++ * Use only long (> 20ms) intervals to filter out excessive ++ * spikes in service rate estimation. + */ - if (usecs > 20000) { - if (bw > bfqd->peak_rate || - (!BFQQ_SEEKY(bfqq) && @@ -3434,26 +4245,20 @@ index cf3e9b1..eef6ff4 100644 - T_fast[dev_type]; - } - } -+ delta_ktime = bfqd->last_idling_start; -+ else -+ delta_ktime = ktime_get(); -+ delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start); -+ delta_usecs = ktime_to_us(delta_ktime); -+ -+ /* don't trust short/unrealistic values. */ -+ if (delta_usecs < 1000 || delta_usecs >= LONG_MAX) { -+ if (blk_queue_nonrot(bfqd->queue)) -+ /* -+ * give same worst-case guarantees as idling -+ * for seeky -+ */ -+ *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC; -+ else /* charge at least one seek */ -+ *delta_ms = bfq_slice_idle / NSEC_PER_MSEC; -+ -+ bfq_log(bfqd, "bfq_bfqq_is_slow: unrealistic %u", delta_usecs); -+ -+ return slow; ++ if (delta_usecs > 20000) { ++ /* ++ * Caveat for rotational devices: processes doing I/O ++ * in the slower disk zones tend to be slow(er) even ++ * if not seeky. In this respect, the estimated peak ++ * rate is likely to be an average over the disk ++ * surface. Accordingly, to not be too harsh with ++ * unlucky processes, a process is deemed slow only if ++ * its rate has been lower than half of the estimated ++ * peak rate. ++ */ ++ slow = bfqq->entity.service < bfqd->bfq_max_budget / 2; ++ bfq_log(bfqd, "bfq_bfqq_is_slow: relative rate %d/%d", ++ bfqq->entity.service, bfqd->bfq_max_budget); } - /* @@ -3465,34 +4270,17 @@ index cf3e9b1..eef6ff4 100644 - */ - if (bfqq->entity.budget <= bfq_max_budget(bfqd) / 8) - return false; -+ *delta_ms = delta_usecs / USEC_PER_MSEC; - - /* +- +- /* - * A process is considered ``slow'' (i.e., seeky, so that we - * cannot treat it fairly in the service domain, as it would - * slow down too much the other processes) if, when a slice - * ends for whatever reason, it has received service at a - * rate that would not be high enough to complete the budget - * before the budget timeout expiration. -+ * Use only long (> 20ms) intervals to filter out excessive -+ * spikes in service rate estimation. - */ +- */ - expected = bw * 1000 * timeout >> BFQ_RATE_SHIFT; -+ if (delta_usecs > 20000) { -+ /* -+ * Caveat for rotational devices: processes doing I/O -+ * in the slower disk zones tend to be slow(er) even -+ * if not seeky. In this respect, the estimated peak -+ * rate is likely to be an average over the disk -+ * surface. Accordingly, to not be too harsh with -+ * unlucky processes, a process is deemed slow only if -+ * its rate has been lower than half of the estimated -+ * peak rate. -+ */ -+ slow = bfqq->entity.service < bfqd->bfq_max_budget / 2; -+ bfq_log(bfqd, "bfq_bfqq_is_slow: relative rate %d/%d", -+ bfqq->entity.service, bfqd->bfq_max_budget); -+ } ++ bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow); - /* - * Caveat: processes doing IO in the slower disk zones will @@ -3503,13 +4291,11 @@ index cf3e9b1..eef6ff4 100644 - * process slow. - */ - return expected > (4 * bfqq->entity.budget) / 3; -+ bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow); -+ + return slow; } /* -@@ -2193,20 +2982,35 @@ static bool bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -2193,20 +3020,35 @@ static bool bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq, static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, struct bfq_queue *bfqq) { @@ -3551,7 +4337,7 @@ index cf3e9b1..eef6ff4 100644 } /** -@@ -2216,28 +3020,24 @@ static unsigned long bfq_infinity_from_now(unsigned long now) +@@ -2216,28 +3058,24 @@ static unsigned long bfq_infinity_from_now(unsigned long now) * @compensate: if true, compensate for the time spent idling. * @reason: the reason causing the expiration. * @@ -3597,12 +4383,13 @@ index cf3e9b1..eef6ff4 100644 */ static void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq, -@@ -2245,41 +3045,52 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, +@@ -2245,41 +3083,53 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, enum bfqq_expiration reason) { bool slow; + unsigned long delta = 0; + struct bfq_entity *entity = &bfqq->entity; ++ int ref; BUG_ON(bfqq != bfqd->in_service_queue); @@ -3673,7 +4460,7 @@ index cf3e9b1..eef6ff4 100644 bfq_clear_bfqq_IO_bound(bfqq); if (bfqd->low_latency && bfqq->wr_coeff == 1) -@@ -2288,19 +3099,23 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, +@@ -2288,19 +3138,23 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 && RB_EMPTY_ROOT(&bfqq->sort_list)) { /* @@ -3705,7 +4492,7 @@ index cf3e9b1..eef6ff4 100644 /* * The application is still waiting for the * completion of one or more requests: -@@ -2317,7 +3132,7 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, +@@ -2317,7 +3171,7 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, * happened to be in the past. */ bfqq->soft_rt_next_start = @@ -3714,7 +4501,7 @@ index cf3e9b1..eef6ff4 100644 /* * Schedule an update of soft_rt_next_start to when * the task may be discovered to be isochronous. -@@ -2327,15 +3142,27 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, +@@ -2327,15 +3181,30 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, } bfq_log_bfqq(bfqd, bfqq, @@ -3732,19 +4519,22 @@ index cf3e9b1..eef6ff4 100644 __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); + BUG_ON(bfqq->next_rq == NULL && + bfqq->entity.budget < bfqq->entity.service); ++ ref = bfqq->ref; __bfq_bfqq_expire(bfqd, bfqq); + -+ BUG_ON(!bfq_bfqq_busy(bfqq) && reason == BFQ_BFQQ_BUDGET_EXHAUSTED && ++ BUG_ON(ref > 1 && ++ !bfq_bfqq_busy(bfqq) && reason == BFQ_BFQQ_BUDGET_EXHAUSTED && + !bfq_class_idle(bfqq)); + -+ if (!bfq_bfqq_busy(bfqq) && ++ /* mark bfqq as waiting a request only if a bic still points to it */ ++ if (ref > 1 && !bfq_bfqq_busy(bfqq) && + reason != BFQ_BFQQ_BUDGET_TIMEOUT && + reason != BFQ_BFQQ_BUDGET_EXHAUSTED) + bfq_mark_bfqq_non_blocking_wait_rq(bfqq); } /* -@@ -2345,20 +3172,17 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, +@@ -2345,20 +3214,17 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, */ static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq) { @@ -3773,7 +4563,7 @@ index cf3e9b1..eef6ff4 100644 static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) { bfq_log_bfqq(bfqq->bfqd, bfqq, -@@ -2400,10 +3224,12 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) +@@ -2400,10 +3266,12 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) { struct bfq_data *bfqd = bfqq->bfqd; bool idling_boosts_thr, idling_boosts_thr_without_issues, @@ -3787,7 +4577,7 @@ index cf3e9b1..eef6ff4 100644 /* * The next variable takes into account the cases where idling * boosts the throughput. -@@ -2466,74 +3292,27 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) +@@ -2466,74 +3334,27 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) bfqd->wr_busy_queues == 0; /* @@ -3799,7 +4589,8 @@ index cf3e9b1..eef6ff4 100644 - * associated to the queue issues sequential/random requests - * (in the second case the queue may be tagged as seeky or - * even constantly_seeky). -- * ++ * guarantees. + * - * To introduce the first case, we note that, since - * bfq_bfqq_idle_window(bfqq) is false if the device is - * NCQ-capable and bfqq is random (see @@ -3812,8 +4603,7 @@ index cf3e9b1..eef6ff4 100644 - * it. And, beneficially, this would imply that throughput - * would always be boosted also with random I/O on NCQ-capable - * HDDs. -+ * guarantees. - * +- * - * But we must be careful on this point, to avoid an unfair - * treatment for bfqq. In fact, because of the same above - * assignments, idling_boosts_thr_without_issues is, on the @@ -3881,7 +4671,7 @@ index cf3e9b1..eef6ff4 100644 * (i) each of these processes must get the same throughput as * the others; * (ii) all these processes have the same I/O pattern -@@ -2555,26 +3334,53 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) +@@ -2555,26 +3376,53 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) * words, only if sub-condition (i) holds, then idling is * allowed, and the device tends to be prevented from queueing * many requests, possibly of several processes. The reason @@ -3955,7 +4745,7 @@ index cf3e9b1..eef6ff4 100644 * * According to the above considerations, the next variable is * true (only) if sub-condition (i) holds. To compute the -@@ -2582,7 +3388,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) +@@ -2582,7 +3430,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) * the function bfq_symmetric_scenario(), but also check * whether bfqq is being weight-raised, because * bfq_symmetric_scenario() does not take into account also @@ -3964,7 +4754,7 @@ index cf3e9b1..eef6ff4 100644 * bfq_weights_tree_add()). * * As a side note, it is worth considering that the above -@@ -2604,17 +3410,16 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) +@@ -2604,17 +3452,16 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) * bfqq. Such a case is when bfqq became active in a burst of * queue activations. Queues that became active during a large * burst benefit only from throughput, as discussed in the @@ -3987,7 +4777,7 @@ index cf3e9b1..eef6ff4 100644 /* * We have now all the components we need to compute the return -@@ -2624,6 +3429,16 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) +@@ -2624,6 +3471,16 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) * 2) idling either boosts the throughput (without issues), or * is necessary to preserve service guarantees. */ @@ -4004,7 +4794,7 @@ index cf3e9b1..eef6ff4 100644 return bfq_bfqq_sync(bfqq) && (idling_boosts_thr_without_issues || idling_needed_for_service_guarantees); -@@ -2635,7 +3450,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) +@@ -2635,7 +3492,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) * 1) the queue must remain in service and cannot be expired, and * 2) the device must be idled to wait for the possible arrival of a new * request for the queue. @@ -4013,7 +4803,7 @@ index cf3e9b1..eef6ff4 100644 * why performing device idling is the best choice to boost the throughput * and preserve service guarantees when bfq_bfqq_may_idle itself * returns true. -@@ -2665,7 +3480,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) +@@ -2665,18 +3522,33 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue"); if (bfq_may_expire_for_budg_timeout(bfqq) && @@ -4022,7 +4812,33 @@ index cf3e9b1..eef6ff4 100644 !bfq_bfqq_must_idle(bfqq)) goto expire; -@@ -2685,7 +3500,8 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) ++check_queue: ++ /* ++ * This loop is rarely executed more than once. Even when it ++ * happens, it is much more convenient to re-execute this loop ++ * than to return NULL and trigger a new dispatch to get a ++ * request served. ++ */ + next_rq = bfqq->next_rq; + /* + * If bfqq has requests queued and it has enough budget left to + * serve them, keep the queue, otherwise expire it. + */ + if (next_rq) { ++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); ++ + if (bfq_serv_to_charge(next_rq, bfqq) > + bfq_bfqq_budget_left(bfqq)) { ++ /* ++ * Expire the queue for budget exhaustion, ++ * which makes sure that the next budget is ++ * enough to serve the next request, even if ++ * it comes from the fifo expired path. ++ */ + reason = BFQ_BFQQ_BUDGET_EXHAUSTED; + goto expire; + } else { +@@ -2685,7 +3557,8 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) * not disable disk idling even when a new request * arrives. */ @@ -4032,7 +4848,7 @@ index cf3e9b1..eef6ff4 100644 /* * If we get here: 1) at least a new request * has arrived but we have not disabled the -@@ -2700,10 +3516,8 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) +@@ -2700,10 +3573,8 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) * So we disable idling. */ bfq_clear_bfqq_wait_request(bfqq); @@ -4044,7 +4860,7 @@ index cf3e9b1..eef6ff4 100644 } goto keep_queue; } -@@ -2714,7 +3528,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) +@@ -2714,7 +3585,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) * for a new request, or has requests waiting for a completion and * may idle after their completion, then keep it anyway. */ @@ -4053,7 +4869,26 @@ index cf3e9b1..eef6ff4 100644 (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) { bfqq = NULL; goto keep_queue; -@@ -2736,6 +3550,9 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -2725,9 +3596,16 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) + bfq_bfqq_expire(bfqd, bfqq, false, reason); + new_queue: + bfqq = bfq_set_in_service_queue(bfqd); +- bfq_log(bfqd, "select_queue: new queue %d returned", +- bfqq ? bfqq->pid : 0); ++ if (bfqq) { ++ bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue"); ++ goto check_queue; ++ } + keep_queue: ++ if (bfqq) ++ bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue"); ++ else ++ bfq_log(bfqd, "select_queue: no queue returned"); ++ + return bfqq; + } + +@@ -2736,6 +3614,9 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) struct bfq_entity *entity = &bfqq->entity; if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */ @@ -4063,7 +4898,7 @@ index cf3e9b1..eef6ff4 100644 bfq_log_bfqq(bfqd, bfqq, "raising period dur %u/%u msec, old coeff %u, w %d(%d)", jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), -@@ -2749,22 +3566,30 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -2749,22 +3630,30 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change"); /* @@ -4108,16 +4943,52 @@ index cf3e9b1..eef6ff4 100644 } } /* Update weight both if it must be raised and if it must be lowered */ -@@ -2815,13 +3640,29 @@ static int bfq_dispatch_request(struct bfq_data *bfqd, - */ - if (!bfqd->rq_in_driver) - bfq_schedule_dispatch(bfqd); -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); - goto expire; - } +@@ -2782,46 +3671,34 @@ static int bfq_dispatch_request(struct bfq_data *bfqd, + struct bfq_queue *bfqq) + { + int dispatched = 0; +- struct request *rq; ++ struct request *rq = bfqq->next_rq; + unsigned long service_to_charge; + BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); +- +- /* Follow expired path, else get first next available. */ +- rq = bfq_check_fifo(bfqq); +- if (!rq) +- rq = bfqq->next_rq; ++ BUG_ON(!rq); + service_to_charge = bfq_serv_to_charge(rq, bfqq); + +- if (service_to_charge > bfq_bfqq_budget_left(bfqq)) { +- /* +- * This may happen if the next rq is chosen in fifo order +- * instead of sector order. The budget is properly +- * dimensioned to be always sufficient to serve the next +- * request only if it is chosen in sector order. The reason +- * is that it would be quite inefficient and little useful +- * to always make sure that the budget is large enough to +- * serve even the possible next rq in fifo order. +- * In fact, requests are seldom served in fifo order. +- * +- * Expire the queue for budget exhaustion, and make sure +- * that the next act_budget is enough to serve the next +- * request, even if it comes from the fifo expired path. +- */ +- bfqq->next_rq = rq; +- /* +- * Since this dispatch is failed, make sure that +- * a new one will be performed +- */ +- if (!bfqd->rq_in_driver) +- bfq_schedule_dispatch(bfqd); +- goto expire; +- } ++ BUG_ON(service_to_charge > bfq_bfqq_budget_left(bfqq)); ++ + BUG_ON(bfqq->entity.budget < bfqq->entity.service); - /* Finally, insert request into driver dispatch list. */ + +- /* Finally, insert request into driver dispatch list. */ bfq_bfqq_served(bfqq, service_to_charge); + + BUG_ON(bfqq->entity.budget < bfqq->entity.service); @@ -4138,7 +5009,7 @@ index cf3e9b1..eef6ff4 100644 bfq_update_wr_data(bfqd, bfqq); bfq_log_bfqq(bfqd, bfqq, -@@ -2837,9 +3678,7 @@ static int bfq_dispatch_request(struct bfq_data *bfqd, +@@ -2837,9 +3714,7 @@ static int bfq_dispatch_request(struct bfq_data *bfqd, bfqd->in_service_bic = RQ_BIC(rq); } @@ -4149,7 +5020,7 @@ index cf3e9b1..eef6ff4 100644 goto expire; return dispatched; -@@ -2885,8 +3724,8 @@ static int bfq_forced_dispatch(struct bfq_data *bfqd) +@@ -2885,8 +3760,8 @@ static int bfq_forced_dispatch(struct bfq_data *bfqd) st = bfq_entity_service_tree(&bfqq->entity); dispatched += __bfq_forced_dispatch_bfqq(bfqq); @@ -4159,7 +5030,7 @@ index cf3e9b1..eef6ff4 100644 bfq_forget_idle(st); } -@@ -2899,37 +3738,37 @@ static int bfq_dispatch_requests(struct request_queue *q, int force) +@@ -2899,37 +3774,37 @@ static int bfq_dispatch_requests(struct request_queue *q, int force) { struct bfq_data *bfqd = q->elevator->elevator_data; struct bfq_queue *bfqq; @@ -4215,7 +5086,7 @@ index cf3e9b1..eef6ff4 100644 if (!bfq_dispatch_request(bfqd, bfqq)) return 0; -@@ -2937,6 +3776,8 @@ static int bfq_dispatch_requests(struct request_queue *q, int force) +@@ -2937,6 +3812,8 @@ static int bfq_dispatch_requests(struct request_queue *q, int force) bfq_log_bfqq(bfqd, bfqq, "dispatched %s request", bfq_bfqq_sync(bfqq) ? "sync" : "async"); @@ -4224,7 +5095,13 @@ index cf3e9b1..eef6ff4 100644 return 1; } -@@ -2948,23 +3789,22 @@ static int bfq_dispatch_requests(struct request_queue *q, int force) +@@ -2944,27 +3821,26 @@ static int bfq_dispatch_requests(struct request_queue *q, int force) + * Task holds one reference to the queue, dropped when task exits. Each rq + * in-flight on this queue also holds a reference, dropped when rq is freed. + * +- * Queue lock must be held here. ++ * Queue lock must be held here. Recall not to use bfqq after calling ++ * this function on it. */ static void bfq_put_queue(struct bfq_queue *bfqq) { @@ -4249,11 +5126,10 @@ index cf3e9b1..eef6ff4 100644 BUG_ON(bfqq->entity.tree); BUG_ON(bfq_bfqq_busy(bfqq)); - BUG_ON(bfqd->in_service_queue == bfqq); -+ BUG_ON(bfqq->bfqd->in_service_queue == bfqq); if (bfq_bfqq_sync(bfqq)) /* -@@ -2977,7 +3817,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq) +@@ -2977,7 +3853,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq) */ hlist_del_init(&bfqq->burst_list_node); @@ -4262,7 +5138,7 @@ index cf3e9b1..eef6ff4 100644 kmem_cache_free(bfq_pool, bfqq); #ifdef CONFIG_BFQ_GROUP_IOSCHED -@@ -3011,8 +3851,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -3011,38 +3887,16 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfq_schedule_dispatch(bfqd); } @@ -4272,7 +5148,9 @@ index cf3e9b1..eef6ff4 100644 bfq_put_cooperator(bfqq); -@@ -3021,28 +3860,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) +- bfq_put_queue(bfqq); ++ bfq_put_queue(bfqq); /* release process reference */ + } static void bfq_init_icq(struct io_cq *icq) { @@ -4302,7 +5180,7 @@ index cf3e9b1..eef6ff4 100644 } static void bfq_exit_icq(struct io_cq *icq) -@@ -3050,21 +3868,21 @@ static void bfq_exit_icq(struct io_cq *icq) +@@ -3050,21 +3904,21 @@ static void bfq_exit_icq(struct io_cq *icq) struct bfq_io_cq *bic = icq_to_bic(icq); struct bfq_data *bfqd = bic_to_bfqd(bic); @@ -4331,7 +5209,7 @@ index cf3e9b1..eef6ff4 100644 } } -@@ -3072,8 +3890,8 @@ static void bfq_exit_icq(struct io_cq *icq) +@@ -3072,8 +3926,8 @@ static void bfq_exit_icq(struct io_cq *icq) * Update the entity prio values; note that the new values will not * be used until the next (re)activation. */ @@ -4342,7 +5220,7 @@ index cf3e9b1..eef6ff4 100644 { struct task_struct *tsk = current; int ioprio_class; -@@ -3105,7 +3923,7 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) +@@ -3105,7 +3959,7 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) break; } @@ -4351,7 +5229,7 @@ index cf3e9b1..eef6ff4 100644 pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n", bfqq->new_ioprio); BUG(); -@@ -3113,45 +3931,40 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) +@@ -3113,45 +3967,41 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio); bfqq->entity.prio_changed = 1; @@ -4393,6 +5271,7 @@ index cf3e9b1..eef6ff4 100644 - bfqq, atomic_read(&bfqq->ref)); - bfq_put_queue(bfqq); - } ++ /* release process reference on this queue */ + bfq_put_queue(bfqq); + bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic); + bic_set_bfqq(bic, bfqq, false); @@ -4411,7 +5290,7 @@ index cf3e9b1..eef6ff4 100644 } static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, -@@ -3160,8 +3973,9 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -3160,8 +4010,9 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, RB_CLEAR_NODE(&bfqq->entity.rb_node); INIT_LIST_HEAD(&bfqq->fifo); INIT_HLIST_NODE(&bfqq->burst_list_node); @@ -4422,7 +5301,7 @@ index cf3e9b1..eef6ff4 100644 bfqq->bfqd = bfqd; if (bic) -@@ -3171,6 +3985,7 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -3171,6 +4022,7 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, if (!bfq_class_idle(bfqq)) bfq_mark_bfqq_idle_window(bfqq); bfq_mark_bfqq_sync(bfqq); @@ -4430,7 +5309,7 @@ index cf3e9b1..eef6ff4 100644 } else bfq_clear_bfqq_sync(bfqq); bfq_mark_bfqq_IO_bound(bfqq); -@@ -3180,72 +3995,19 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -3180,72 +4032,19 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqq->pid = pid; bfqq->wr_coeff = 1; @@ -4487,7 +5366,8 @@ index cf3e9b1..eef6ff4 100644 - gfp_mask | __GFP_ZERO, - bfqd->queue->node); - } -- ++ bfqq->soft_rt_next_start = bfq_greatest_from_now(); + - if (bfqq) { - bfq_init_bfqq(bfqd, bfqq, bic, current->pid, - is_sync); @@ -4503,15 +5383,14 @@ index cf3e9b1..eef6ff4 100644 - kmem_cache_free(bfq_pool, new_bfqq); - - rcu_read_unlock(); -+ bfqq->soft_rt_next_start = bfq_greatest_from_now(); - +- - return bfqq; + /* first request is almost certainly seeky */ + bfqq->seek_history = 1; } static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, -@@ -3268,90 +4030,84 @@ static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, +@@ -3268,90 +4067,93 @@ static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, } static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, @@ -4552,7 +5431,8 @@ index cf3e9b1..eef6ff4 100644 - if (!bfqq) - bfqq = bfq_find_alloc_queue(bfqd, bio, is_sync, bic, gfp_mask); -+ bfqq = kmem_cache_alloc_node(bfq_pool, GFP_NOWAIT | __GFP_ZERO, ++ bfqq = kmem_cache_alloc_node(bfq_pool, ++ GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN, + bfqd->queue->node); + + if (bfqq) { @@ -4573,7 +5453,13 @@ index cf3e9b1..eef6ff4 100644 - if (!is_sync && !(*async_bfqq)) { - atomic_inc(&bfqq->ref); + if (async_bfqq) { -+ bfqq->ref++; ++ bfqq->ref++; /* ++ * Extra group reference, w.r.t. sync ++ * queue. This extra reference is removed ++ * only if bfqq->bfqg disappears, to ++ * guarantee that this queue is not freed ++ * until its group goes away. ++ */ bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d", - bfqq, atomic_read(&bfqq->ref)); + bfqq, bfqq->ref); @@ -4584,7 +5470,7 @@ index cf3e9b1..eef6ff4 100644 - bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, - atomic_read(&bfqq->ref)); +out: -+ bfqq->ref++; ++ bfqq->ref++; /* get a process reference to this queue */ + bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref); + rcu_read_unlock(); return bfqq; @@ -4597,13 +5483,13 @@ index cf3e9b1..eef6ff4 100644 - unsigned long ttime = min(elapsed, 2UL * bfqd->bfq_slice_idle); + struct bfq_ttime *ttime = &bic->ttime; + u64 elapsed = ktime_get_ns() - bic->ttime.last_end_request; -+ -+ elapsed = min_t(u64, elapsed, 2 * bfqd->bfq_slice_idle); - bic->ttime.ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8; - bic->ttime.ttime_total = (7*bic->ttime.ttime_total + 256*ttime) / 8; - bic->ttime.ttime_mean = (bic->ttime.ttime_total + 128) / - bic->ttime.ttime_samples; ++ elapsed = min_t(u64, elapsed, 2 * bfqd->bfq_slice_idle); ++ + ttime->ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8; + ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8); + ttime->ttime_mean = div64_ul(ttime->ttime_total + 128, @@ -4646,11 +5532,13 @@ index cf3e9b1..eef6ff4 100644 - (u64)bfqq->seek_mean); + bfqq->seek_history <<= 1; + bfqq->seek_history |= -+ get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR; ++ get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR && ++ (!blk_queue_nonrot(bfqd->queue) || ++ blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT); } /* -@@ -3369,7 +4125,8 @@ static void bfq_update_idle_window(struct bfq_data *bfqd, +@@ -3369,7 +4171,8 @@ static void bfq_update_idle_window(struct bfq_data *bfqd, return; /* Idle window just restored, statistics are meaningless. */ @@ -4660,7 +5548,7 @@ index cf3e9b1..eef6ff4 100644 return; enable_idle = bfq_bfqq_idle_window(bfqq); -@@ -3409,22 +4166,13 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -3409,22 +4212,13 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfq_update_io_thinktime(bfqd, bic); bfq_update_io_seektime(bfqd, bfqq, rq); @@ -4685,7 +5573,7 @@ index cf3e9b1..eef6ff4 100644 bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); -@@ -3438,14 +4186,15 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -3438,14 +4232,15 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, * is small and the queue is not to be expired, then * just exit. * @@ -4709,7 +5597,7 @@ index cf3e9b1..eef6ff4 100644 */ if (small_req && !budget_timeout) return; -@@ -3457,10 +4206,8 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -3457,10 +4252,8 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, * timer. */ bfq_clear_bfqq_wait_request(bfqq); @@ -4721,17 +5609,22 @@ index cf3e9b1..eef6ff4 100644 /* * The queue is not empty, because a new request just -@@ -3504,28 +4251,21 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq) +@@ -3504,28 +4297,24 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq) */ new_bfqq->allocated[rq_data_dir(rq)]++; bfqq->allocated[rq_data_dir(rq)]--; - atomic_inc(&new_bfqq->ref); +- bfq_put_queue(bfqq); + new_bfqq->ref++; + bfq_clear_bfqq_just_created(bfqq); - bfq_put_queue(bfqq); if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) bfq_merge_bfqqs(bfqd, RQ_BIC(rq), bfqq, new_bfqq); ++ /* ++ * rq is about to be enqueued into new_bfqq, ++ * release rq reference on bfqq ++ */ ++ bfq_put_queue(bfqq); rq->elv.priv[1] = new_bfqq; bfqq = new_bfqq; - } else @@ -4750,12 +5643,11 @@ index cf3e9b1..eef6ff4 100644 - if (bfqq->bic) - bfqq->bic->wr_time_left = 0; - rq->fifo_time = jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; -+ rq->fifo_time = ktime_get_ns() + -+ jiffies_to_nsecs(bfqd->bfq_fifo_expire[rq_is_sync(rq)]); ++ rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; list_add_tail(&rq->queuelist, &bfqq->fifo); bfq_rq_enqueued(bfqd, bfqq, rq); -@@ -3533,8 +4273,8 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq) +@@ -3533,8 +4322,8 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq) static void bfq_update_hw_tag(struct bfq_data *bfqd) { @@ -4766,7 +5658,7 @@ index cf3e9b1..eef6ff4 100644 if (bfqd->hw_tag == 1) return; -@@ -3560,48 +4300,85 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq) +@@ -3560,48 +4349,85 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq) { struct bfq_queue *bfqq = RQ_BFQQ(rq); struct bfq_data *bfqd = bfqq->bfqd; @@ -4791,7 +5683,7 @@ index cf3e9b1..eef6ff4 100644 rq_start_time_ns(rq), - rq_io_start_time_ns(rq), rq->cmd_flags); -#endif -+ rq_io_start_time_ns(rq), req_op(rq), ++ rq_io_start_time_ns(rq), + rq->cmd_flags); if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) { @@ -4876,7 +5768,7 @@ index cf3e9b1..eef6ff4 100644 */ if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 && RB_EMPTY_ROOT(&bfqq->sort_list)) -@@ -3613,10 +4390,7 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq) +@@ -3613,10 +4439,7 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq) * or if we want to idle in case it has no pending requests. */ if (bfqd->in_service_queue == bfqq) { @@ -4888,25 +5780,25 @@ index cf3e9b1..eef6ff4 100644 bfq_arm_slice_timer(bfqd); goto out; } else if (bfq_may_expire_for_budg_timeout(bfqq)) -@@ -3646,7 +4420,7 @@ static int __bfq_may_queue(struct bfq_queue *bfqq) +@@ -3646,7 +4469,7 @@ static int __bfq_may_queue(struct bfq_queue *bfqq) return ELV_MQUEUE_MAY; } -static int bfq_may_queue(struct request_queue *q, int rw) -+static int bfq_may_queue(struct request_queue *q, int op, int op_flags) ++static int bfq_may_queue(struct request_queue *q, unsigned int op) { struct bfq_data *bfqd = q->elevator->elevator_data; struct task_struct *tsk = current; -@@ -3663,7 +4437,7 @@ static int bfq_may_queue(struct request_queue *q, int rw) +@@ -3663,7 +4486,7 @@ static int bfq_may_queue(struct request_queue *q, int rw) if (!bic) return ELV_MQUEUE_MAY; - bfqq = bic_to_bfqq(bic, rw_is_sync(rw)); -+ bfqq = bic_to_bfqq(bic, rw_is_sync(op, op_flags)); ++ bfqq = bic_to_bfqq(bic, op_is_sync(op)); if (bfqq) return __bfq_may_queue(bfqq); -@@ -3687,14 +4461,14 @@ static void bfq_put_request(struct request *rq) +@@ -3687,14 +4510,14 @@ static void bfq_put_request(struct request *rq) rq->elv.priv[1] = NULL; bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d", @@ -4923,7 +5815,7 @@ index cf3e9b1..eef6ff4 100644 */ static struct bfq_queue * bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) -@@ -3732,11 +4506,8 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, +@@ -3732,37 +4555,60 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, unsigned long flags; bool split = false; @@ -4936,7 +5828,11 @@ index cf3e9b1..eef6ff4 100644 if (!bic) goto queue_fail; -@@ -3746,23 +4517,47 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, + ++ bfq_check_ioprio_change(bic, bio); ++ + bfq_bic_update_cgroup(bic, bio); + new_queue: bfqq = bic_to_bfqq(bic, is_sync); if (!bfqq || bfqq == &bfqd->oom_bfqq) { @@ -4987,7 +5883,7 @@ index cf3e9b1..eef6ff4 100644 bfqq = bfq_split_bfqq(bic, bfqq); split = true; if (!bfqq) -@@ -3771,9 +4566,8 @@ new_queue: +@@ -3771,9 +4617,8 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, } bfqq->allocated[rw]++; @@ -4999,7 +5895,7 @@ index cf3e9b1..eef6ff4 100644 rq->elv.priv[0] = bic; rq->elv.priv[1] = bfqq; -@@ -3788,7 +4582,6 @@ new_queue: +@@ -3788,7 +4633,6 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { bfqq->bic = bic; if (split) { @@ -5007,7 +5903,7 @@ index cf3e9b1..eef6ff4 100644 /* * If the queue has just been split from a shared * queue, restore the idle window and the possible -@@ -3798,6 +4591,9 @@ new_queue: +@@ -3798,6 +4642,9 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, } } @@ -5017,7 +5913,7 @@ index cf3e9b1..eef6ff4 100644 spin_unlock_irqrestore(q->queue_lock, flags); return 0; -@@ -3824,9 +4620,10 @@ static void bfq_kick_queue(struct work_struct *work) +@@ -3824,9 +4671,10 @@ static void bfq_kick_queue(struct work_struct *work) * Handler of the expiration of the timer running if the in-service queue * is idling inside its time slice. */ @@ -5030,7 +5926,7 @@ index cf3e9b1..eef6ff4 100644 struct bfq_queue *bfqq; unsigned long flags; enum bfqq_expiration reason; -@@ -3844,6 +4641,8 @@ static void bfq_idle_slice_timer(unsigned long data) +@@ -3844,6 +4692,8 @@ static void bfq_idle_slice_timer(unsigned long data) */ if (bfqq) { bfq_log_bfqq(bfqd, bfqq, "slice_timer expired"); @@ -5039,7 +5935,7 @@ index cf3e9b1..eef6ff4 100644 if (bfq_bfqq_budget_timeout(bfqq)) /* * Also here the queue can be safely expired -@@ -3869,14 +4668,16 @@ schedule_dispatch: +@@ -3869,25 +4719,26 @@ static void bfq_idle_slice_timer(unsigned long data) bfq_schedule_dispatch(bfqd); spin_unlock_irqrestore(bfqd->queue->queue_lock, flags); @@ -5053,11 +5949,12 @@ index cf3e9b1..eef6ff4 100644 cancel_work_sync(&bfqd->unplug_work); } -+#ifdef CONFIG_BFQ_GROUP_IOSCHED static void __bfq_put_async_bfqq(struct bfq_data *bfqd, - struct bfq_queue **bfqq_ptr) +- struct bfq_queue **bfqq_ptr) ++ struct bfq_queue **bfqq_ptr) { -@@ -3885,9 +4686,9 @@ static void __bfq_put_async_bfqq(struct bfq_data *bfqd, + struct bfq_group *root_group = bfqd->root_group; + struct bfq_queue *bfqq = *bfqq_ptr; bfq_log(bfqd, "put_async_bfqq: %p", bfqq); if (bfqq) { @@ -5069,15 +5966,14 @@ index cf3e9b1..eef6ff4 100644 bfq_put_queue(bfqq); *bfqq_ptr = NULL; } -@@ -3909,6 +4710,7 @@ static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) +@@ -3922,19 +4773,18 @@ static void bfq_exit_queue(struct elevator_queue *e) - __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq); - } -+#endif + BUG_ON(bfqd->in_service_queue); + list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) +- bfq_deactivate_bfqq(bfqd, bfqq, 0); ++ bfq_deactivate_bfqq(bfqd, bfqq, false, false); - static void bfq_exit_queue(struct elevator_queue *e) - { -@@ -3928,9 +4730,7 @@ static void bfq_exit_queue(struct elevator_queue *e) + spin_unlock_irq(q->queue_lock); bfq_shutdown_timer_wq(bfqd); @@ -5088,7 +5984,20 @@ index cf3e9b1..eef6ff4 100644 #ifdef CONFIG_BFQ_GROUP_IOSCHED blkcg_deactivate_policy(q, &blkcg_policy_bfq); -@@ -3978,11 +4778,14 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) + #else ++ bfq_put_async_queues(bfqd, bfqd->root_group); + kfree(bfqd->root_group); + #endif + +@@ -3954,6 +4804,7 @@ static void bfq_init_root_group(struct bfq_group *root_group, + root_group->rq_pos_tree = RB_ROOT; + for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) + root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; ++ root_group->sched_data.bfq_class_idle_last_service = jiffies; + } + + static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) +@@ -3978,11 +4829,14 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) * will not attempt to free it. */ bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0); @@ -5104,7 +6013,7 @@ index cf3e9b1..eef6ff4 100644 /* * Trigger weight initialization, according to ioprio, at the * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio -@@ -4001,13 +4804,10 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) +@@ -4001,13 +4855,10 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) goto out_free; bfq_init_root_group(bfqd->root_group, bfqd); bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); @@ -5120,10 +6029,11 @@ index cf3e9b1..eef6ff4 100644 bfqd->queue_weights_tree = RB_ROOT; bfqd->group_weights_tree = RB_ROOT; -@@ -4028,20 +4828,19 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) +@@ -4027,21 +4878,19 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) + bfqd->bfq_back_max = bfq_back_max; bfqd->bfq_back_penalty = bfq_back_penalty; bfqd->bfq_slice_idle = bfq_slice_idle; - bfqd->bfq_class_idle_last_service = 0; +- bfqd->bfq_class_idle_last_service = 0; - bfqd->bfq_max_budget_async_rq = bfq_max_budget_async_rq; - bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async; - bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync; @@ -5148,7 +6058,7 @@ index cf3e9b1..eef6ff4 100644 bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300); bfqd->bfq_wr_max_time = 0; bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000); -@@ -4053,16 +4852,15 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) +@@ -4053,16 +4902,15 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) * video. */ bfqd->wr_busy_queues = 0; @@ -5169,7 +6079,7 @@ index cf3e9b1..eef6ff4 100644 bfqd->device_speed = BFQ_BFQD_FAST; return 0; -@@ -4088,7 +4886,7 @@ static int __init bfq_slab_setup(void) +@@ -4088,7 +4936,7 @@ static int __init bfq_slab_setup(void) static ssize_t bfq_var_show(unsigned int var, char *page) { @@ -5178,7 +6088,7 @@ index cf3e9b1..eef6ff4 100644 } static ssize_t bfq_var_store(unsigned long *var, const char *page, -@@ -4159,21 +4957,21 @@ static ssize_t bfq_weights_show(struct elevator_queue *e, char *page) +@@ -4159,21 +5007,21 @@ static ssize_t bfq_weights_show(struct elevator_queue *e, char *page) static ssize_t __FUNC(struct elevator_queue *e, char *page) \ { \ struct bfq_data *bfqd = e->elevator_data; \ @@ -5209,7 +6119,7 @@ index cf3e9b1..eef6ff4 100644 SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0); SHOW_FUNCTION(bfq_wr_coeff_show, bfqd->bfq_wr_coeff, 0); SHOW_FUNCTION(bfq_wr_rt_max_time_show, bfqd->bfq_wr_rt_max_time, 1); -@@ -4183,6 +4981,17 @@ SHOW_FUNCTION(bfq_wr_min_inter_arr_async_show, bfqd->bfq_wr_min_inter_arr_async, +@@ -4183,6 +5031,17 @@ SHOW_FUNCTION(bfq_wr_min_inter_arr_async_show, bfqd->bfq_wr_min_inter_arr_async, SHOW_FUNCTION(bfq_wr_max_softrt_rate_show, bfqd->bfq_wr_max_softrt_rate, 0); #undef SHOW_FUNCTION @@ -5227,7 +6137,7 @@ index cf3e9b1..eef6ff4 100644 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ static ssize_t \ __FUNC(struct elevator_queue *e, const char *page, size_t count) \ -@@ -4194,24 +5003,22 @@ __FUNC(struct elevator_queue *e, const char *page, size_t count) \ +@@ -4194,24 +5053,22 @@ __FUNC(struct elevator_queue *e, const char *page, size_t count) \ __data = (MIN); \ else if (__data > (MAX)) \ __data = (MAX); \ @@ -5258,7 +6168,7 @@ index cf3e9b1..eef6ff4 100644 STORE_FUNCTION(bfq_wr_coeff_store, &bfqd->bfq_wr_coeff, 1, INT_MAX, 0); STORE_FUNCTION(bfq_wr_max_time_store, &bfqd->bfq_wr_max_time, 0, INT_MAX, 1); STORE_FUNCTION(bfq_wr_rt_max_time_store, &bfqd->bfq_wr_rt_max_time, 0, INT_MAX, -@@ -4224,6 +5031,23 @@ STORE_FUNCTION(bfq_wr_max_softrt_rate_store, &bfqd->bfq_wr_max_softrt_rate, 0, +@@ -4224,6 +5081,23 @@ STORE_FUNCTION(bfq_wr_max_softrt_rate_store, &bfqd->bfq_wr_max_softrt_rate, 0, INT_MAX, 0); #undef STORE_FUNCTION @@ -5266,7 +6176,7 @@ index cf3e9b1..eef6ff4 100644 +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\ +{ \ + struct bfq_data *bfqd = e->elevator_data; \ -+ unsigned long __data; \ ++ unsigned long uninitialized_var(__data); \ + int ret = bfq_var_store(&__data, (page), count); \ + if (__data < (MIN)) \ + __data = (MIN); \ @@ -5282,7 +6192,7 @@ index cf3e9b1..eef6ff4 100644 /* do nothing for the moment */ static ssize_t bfq_weights_store(struct elevator_queue *e, const char *page, size_t count) -@@ -4231,16 +5055,6 @@ static ssize_t bfq_weights_store(struct elevator_queue *e, +@@ -4231,16 +5105,6 @@ static ssize_t bfq_weights_store(struct elevator_queue *e, return count; } @@ -5299,7 +6209,7 @@ index cf3e9b1..eef6ff4 100644 static ssize_t bfq_max_budget_store(struct elevator_queue *e, const char *page, size_t count) { -@@ -4249,7 +5063,7 @@ static ssize_t bfq_max_budget_store(struct elevator_queue *e, +@@ -4249,7 +5113,7 @@ static ssize_t bfq_max_budget_store(struct elevator_queue *e, int ret = bfq_var_store(&__data, (page), count); if (__data == 0) @@ -5308,7 +6218,7 @@ index cf3e9b1..eef6ff4 100644 else { if (__data > INT_MAX) __data = INT_MAX; -@@ -4261,6 +5075,10 @@ static ssize_t bfq_max_budget_store(struct elevator_queue *e, +@@ -4261,6 +5125,10 @@ static ssize_t bfq_max_budget_store(struct elevator_queue *e, return ret; } @@ -5319,7 +6229,7 @@ index cf3e9b1..eef6ff4 100644 static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, const char *page, size_t count) { -@@ -4273,9 +5091,27 @@ static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, +@@ -4273,9 +5141,27 @@ static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, else if (__data > INT_MAX) __data = INT_MAX; @@ -5349,7 +6259,7 @@ index cf3e9b1..eef6ff4 100644 return ret; } -@@ -4305,10 +5141,10 @@ static struct elv_fs_entry bfq_attrs[] = { +@@ -4305,10 +5191,10 @@ static struct elv_fs_entry bfq_attrs[] = { BFQ_ATTR(back_seek_max), BFQ_ATTR(back_seek_penalty), BFQ_ATTR(slice_idle), @@ -5362,7 +6272,7 @@ index cf3e9b1..eef6ff4 100644 BFQ_ATTR(low_latency), BFQ_ATTR(wr_coeff), BFQ_ATTR(wr_max_time), -@@ -4328,7 +5164,8 @@ static struct elevator_type iosched_bfq = { +@@ -4328,7 +5214,8 @@ static struct elevator_type iosched_bfq = { #ifdef CONFIG_BFQ_GROUP_IOSCHED .elevator_bio_merged_fn = bfq_bio_merged, #endif @@ -5372,7 +6282,7 @@ index cf3e9b1..eef6ff4 100644 .elevator_dispatch_fn = bfq_dispatch_requests, .elevator_add_req_fn = bfq_insert_request, .elevator_activate_req_fn = bfq_activate_request, -@@ -4351,18 +5188,28 @@ static struct elevator_type iosched_bfq = { +@@ -4351,18 +5238,28 @@ static struct elevator_type iosched_bfq = { .elevator_owner = THIS_MODULE, }; @@ -5406,11 +6316,11 @@ index cf3e9b1..eef6ff4 100644 - - if (bfq_timeout_async == 0) - bfq_timeout_async = 1; -+ char msg[50] = "BFQ I/O-scheduler: v8r4"; ++ char msg[60] = "BFQ I/O-scheduler: v8r11"; #ifdef CONFIG_BFQ_GROUP_IOSCHED ret = blkcg_policy_register(&blkcg_policy_bfq); -@@ -4375,27 +5222,46 @@ static int __init bfq_init(void) +@@ -4375,27 +5272,46 @@ static int __init bfq_init(void) goto err_pol_unreg; /* @@ -5434,7 +6344,7 @@ index cf3e9b1..eef6ff4 100644 - T_fast[0] = msecs_to_jiffies(5500); - T_fast[1] = msecs_to_jiffies(2000); + T_slow[0] = msecs_to_jiffies(3500); /* actually 4 sec */ -+ T_slow[1] = msecs_to_jiffies(1000); /* actually 1.5 sec */ ++ T_slow[1] = msecs_to_jiffies(6000); /* actually 6.5 sec */ + T_fast[0] = msecs_to_jiffies(7000); /* actually 8 sec */ + T_fast[1] = msecs_to_jiffies(2500); /* actually 3 sec */ @@ -5470,10 +6380,10 @@ index cf3e9b1..eef6ff4 100644 return 0; diff --git a/block/bfq-sched.c b/block/bfq-sched.c -index a5ed694..45d63d3 100644 +index a5ed694..8311bdb 100644 --- a/block/bfq-sched.c +++ b/block/bfq-sched.c -@@ -7,9 +7,13 @@ +@@ -7,28 +7,172 @@ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> * Paolo Valente <paolo.valente@unimore.it> * @@ -5483,73 +6393,314 @@ index a5ed694..45d63d3 100644 + * Copyright (C) 2016 Paolo Valente <paolo.valente@linaro.org> */ +-#ifdef CONFIG_BFQ_GROUP_IOSCHED +-#define for_each_entity(entity) \ +- for (; entity ; entity = entity->parent) +static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); -+ - #ifdef CONFIG_BFQ_GROUP_IOSCHED - #define for_each_entity(entity) \ - for (; entity ; entity = entity->parent) -@@ -22,8 +26,6 @@ static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, - int extract, - struct bfq_data *bfqd); --static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); -- - static void bfq_update_budget(struct bfq_entity *next_in_service) - { - struct bfq_entity *bfqg_entity; -@@ -48,6 +50,7 @@ static void bfq_update_budget(struct bfq_entity *next_in_service) - static int bfq_update_next_in_service(struct bfq_sched_data *sd) - { - struct bfq_entity *next_in_service; -+ struct bfq_queue *bfqq; +-#define for_each_entity_safe(entity, parent) \ +- for (; entity && ({ parent = entity->parent; 1; }); entity = parent) ++/** ++ * bfq_gt - compare two timestamps. ++ * @a: first ts. ++ * @b: second ts. ++ * ++ * Return @a > @b, dealing with wrapping correctly. ++ */ ++static int bfq_gt(u64 a, u64 b) ++{ ++ return (s64)(a - b) > 0; ++} + ++static struct bfq_entity *bfq_root_active_entity(struct rb_root *tree) ++{ ++ struct rb_node *node = tree->rb_node; - if (sd->in_service_entity) - /* will update/requeue at the end of service */ -@@ -65,14 +68,29 @@ static int bfq_update_next_in_service(struct bfq_sched_data *sd) +-static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, +- int extract, +- struct bfq_data *bfqd); ++ return rb_entry(node, struct bfq_entity, rb_node); ++} - if (next_in_service) - bfq_update_budget(next_in_service); -+ else -+ goto exit; +-static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); ++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd); ++ ++static bool bfq_update_parent_budget(struct bfq_entity *next_in_service); ++ ++/** ++ * bfq_update_next_in_service - update sd->next_in_service ++ * @sd: sched_data for which to perform the update. ++ * @new_entity: if not NULL, pointer to the entity whose activation, ++ * requeueing or repositionig triggered the invocation of ++ * this function. ++ * ++ * This function is called to update sd->next_in_service, which, in ++ * its turn, may change as a consequence of the insertion or ++ * extraction of an entity into/from one of the active trees of ++ * sd. These insertions/extractions occur as a consequence of ++ * activations/deactivations of entities, with some activations being ++ * 'true' activations, and other activations being requeueings (i.e., ++ * implementing the second, requeueing phase of the mechanism used to ++ * reposition an entity in its active tree; see comments on ++ * __bfq_activate_entity and __bfq_requeue_entity for details). In ++ * both the last two activation sub-cases, new_entity points to the ++ * just activated or requeued entity. ++ * ++ * Returns true if sd->next_in_service changes in such a way that ++ * entity->parent may become the next_in_service for its parent ++ * entity. ++ */ ++static bool bfq_update_next_in_service(struct bfq_sched_data *sd, ++ struct bfq_entity *new_entity) ++{ ++ struct bfq_entity *next_in_service = sd->next_in_service; ++ struct bfq_queue *bfqq; ++ bool parent_sched_may_change = false; ++ ++ /* ++ * If this update is triggered by the activation, requeueing ++ * or repositiong of an entity that does not coincide with ++ * sd->next_in_service, then a full lookup in the active tree ++ * can be avoided. In fact, it is enough to check whether the ++ * just-modified entity has a higher priority than ++ * sd->next_in_service, or, even if it has the same priority ++ * as sd->next_in_service, is eligible and has a lower virtual ++ * finish time than sd->next_in_service. If this compound ++ * condition holds, then the new entity becomes the new ++ * next_in_service. Otherwise no change is needed. ++ */ ++ if (new_entity && new_entity != sd->next_in_service) { ++ /* ++ * Flag used to decide whether to replace ++ * sd->next_in_service with new_entity. Tentatively ++ * set to true, and left as true if ++ * sd->next_in_service is NULL. ++ */ ++ bool replace_next = true; ++ ++ /* ++ * If there is already a next_in_service candidate ++ * entity, then compare class priorities or timestamps ++ * to decide whether to replace sd->service_tree with ++ * new_entity. ++ */ ++ if (next_in_service) { ++ unsigned int new_entity_class_idx = ++ bfq_class_idx(new_entity); ++ struct bfq_service_tree *st = ++ sd->service_tree + new_entity_class_idx; ++ ++ /* ++ * For efficiency, evaluate the most likely ++ * sub-condition first. ++ */ ++ replace_next = ++ (new_entity_class_idx == ++ bfq_class_idx(next_in_service) ++ && ++ !bfq_gt(new_entity->start, st->vtime) ++ && ++ bfq_gt(next_in_service->finish, ++ new_entity->finish)) ++ || ++ new_entity_class_idx < ++ bfq_class_idx(next_in_service); ++ } ++ ++ if (replace_next) ++ next_in_service = new_entity; ++ } else /* invoked because of a deactivation: lookup needed */ ++ next_in_service = bfq_lookup_next_entity(sd); + ++ if (next_in_service) { ++ parent_sched_may_change = !sd->next_in_service || ++ bfq_update_parent_budget(next_in_service); ++ } ++ ++ sd->next_in_service = next_in_service; ++ ++ if (!next_in_service) ++ return parent_sched_may_change; + +-static void bfq_update_budget(struct bfq_entity *next_in_service) + bfqq = bfq_entity_to_bfqq(next_in_service); + if (bfqq) + bfq_log_bfqq(bfqq->bfqd, bfqq, + "update_next_in_service: chosen this queue"); ++#ifdef CONFIG_BFQ_GROUP_IOSCHED + else { + struct bfq_group *bfqg = + container_of(next_in_service, + struct bfq_group, entity); - ++ + bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, + "update_next_in_service: chosen this entity"); + } -+exit: - return 1; - } - - static void bfq_check_next_in_service(struct bfq_sched_data *sd, - struct bfq_entity *entity) - { -- BUG_ON(sd->next_in_service != entity); -+ WARN_ON(sd->next_in_service != entity); - } - #else - #define for_each_entity(entity) \ -@@ -151,20 +169,36 @@ static u64 bfq_delta(unsigned long service, unsigned long weight) - static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service) ++#endif ++ return parent_sched_may_change; ++} ++ ++#ifdef CONFIG_BFQ_GROUP_IOSCHED ++/* both next loops stop at one of the child entities of the root group */ ++#define for_each_entity(entity) \ ++ for (; entity ; entity = entity->parent) ++ ++/* ++ * For each iteration, compute parent in advance, so as to be safe if ++ * entity is deallocated during the iteration. Such a deallocation may ++ * happen as a consequence of a bfq_put_queue that frees the bfq_queue ++ * containing entity. ++ */ ++#define for_each_entity_safe(entity, parent) \ ++ for (; entity && ({ parent = entity->parent; 1; }); entity = parent) ++ ++/* ++ * Returns true if this budget changes may let next_in_service->parent ++ * become the next_in_service entity for its parent entity. ++ */ ++static bool bfq_update_parent_budget(struct bfq_entity *next_in_service) { - struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ unsigned long long start, finish, delta; - - BUG_ON(entity->weight == 0); + struct bfq_entity *bfqg_entity; + struct bfq_group *bfqg; + struct bfq_sched_data *group_sd; ++ bool ret = false; - entity->finish = entity->start + - bfq_delta(service, entity->weight); + BUG_ON(!next_in_service); -+ start = ((entity->start>>10)*1000)>>12; -+ finish = ((entity->finish>>10)*1000)>>12; -+ delta = ((bfq_delta(service, entity->weight)>>10)*1000)>>12; +@@ -41,60 +185,68 @@ static void bfq_update_budget(struct bfq_entity *next_in_service) + * as it must never become an in-service entity. + */ + bfqg_entity = bfqg->my_entity; +- if (bfqg_entity) ++ if (bfqg_entity) { ++ if (bfqg_entity->budget > next_in_service->budget) ++ ret = true; + bfqg_entity->budget = next_in_service->budget; ++ } ++ ++ return ret; + } + +-static int bfq_update_next_in_service(struct bfq_sched_data *sd) ++/* ++ * This function tells whether entity stops being a candidate for next ++ * service, according to the following logic. ++ * ++ * This function is invoked for an entity that is about to be set in ++ * service. If such an entity is a queue, then the entity is no longer ++ * a candidate for next service (i.e, a candidate entity to serve ++ * after the in-service entity is expired). The function then returns ++ * true. ++ * ++ * In contrast, the entity could stil be a candidate for next service ++ * if it is not a queue, and has more than one child. In fact, even if ++ * one of its children is about to be set in service, other children ++ * may still be the next to serve. As a consequence, a non-queue ++ * entity is not a candidate for next-service only if it has only one ++ * child. And only if this condition holds, then the function returns ++ * true for a non-queue entity. ++ */ ++static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) + { +- struct bfq_entity *next_in_service; ++ struct bfq_group *bfqg; + +- if (sd->in_service_entity) +- /* will update/requeue at the end of service */ +- return 0; ++ if (bfq_entity_to_bfqq(entity)) ++ return true; + +- /* +- * NOTE: this can be improved in many ways, such as returning +- * 1 (and thus propagating upwards the update) only when the +- * budget changes, or caching the bfqq that will be scheduled +- * next from this subtree. By now we worry more about +- * correctness than about performance... +- */ +- next_in_service = bfq_lookup_next_entity(sd, 0, NULL); +- sd->next_in_service = next_in_service; ++ bfqg = container_of(entity, struct bfq_group, entity); + +- if (next_in_service) +- bfq_update_budget(next_in_service); ++ BUG_ON(bfqg == ((struct bfq_data *)(bfqg->bfqd))->root_group); ++ BUG_ON(bfqg->active_entities == 0); ++ if (bfqg->active_entities == 1) ++ return true; + +- return 1; ++ return false; + } + +-static void bfq_check_next_in_service(struct bfq_sched_data *sd, +- struct bfq_entity *entity) +-{ +- BUG_ON(sd->next_in_service != entity); +-} +-#else ++#else /* CONFIG_BFQ_GROUP_IOSCHED */ + #define for_each_entity(entity) \ + for (; entity ; entity = NULL) + + #define for_each_entity_safe(entity, parent) \ + for (parent = NULL; entity ; entity = parent) + +-static int bfq_update_next_in_service(struct bfq_sched_data *sd) ++static bool bfq_update_parent_budget(struct bfq_entity *next_in_service) + { +- return 0; ++ return false; + } + +-static void bfq_check_next_in_service(struct bfq_sched_data *sd, +- struct bfq_entity *entity) ++static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) + { ++ return true; + } + +-static void bfq_update_budget(struct bfq_entity *next_in_service) +-{ +-} +-#endif ++#endif /* CONFIG_BFQ_GROUP_IOSCHED */ + + /* + * Shift for timestamp calculations. This actually limits the maximum +@@ -105,18 +257,6 @@ static void bfq_update_budget(struct bfq_entity *next_in_service) + */ + #define WFQ_SERVICE_SHIFT 22 + +-/** +- * bfq_gt - compare two timestamps. +- * @a: first ts. +- * @b: second ts. +- * +- * Return @a > @b, dealing with wrapping correctly. +- */ +-static int bfq_gt(u64 a, u64 b) +-{ +- return (s64)(a - b) > 0; +-} +- + static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity) + { + struct bfq_queue *bfqq = NULL; +@@ -151,20 +291,36 @@ static u64 bfq_delta(unsigned long service, unsigned long weight) + static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service) + { + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); ++ unsigned long long start, finish, delta; + + BUG_ON(entity->weight == 0); + + entity->finish = entity->start + + bfq_delta(service, entity->weight); + ++ start = ((entity->start>>10)*1000)>>12; ++ finish = ((entity->finish>>10)*1000)>>12; ++ delta = ((bfq_delta(service, entity->weight)>>10)*1000)>>12; + if (bfqq) { bfq_log_bfqq(bfqq->bfqd, bfqq, @@ -5575,7 +6726,7 @@ index a5ed694..45d63d3 100644 } } -@@ -293,10 +327,26 @@ static void bfq_update_min(struct bfq_entity *entity, struct rb_node *node) +@@ -293,10 +449,26 @@ static void bfq_update_min(struct bfq_entity *entity, struct rb_node *node) static void bfq_update_active_node(struct rb_node *node) { struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node); @@ -5602,7 +6753,7 @@ index a5ed694..45d63d3 100644 } /** -@@ -386,8 +436,6 @@ static void bfq_active_insert(struct bfq_service_tree *st, +@@ -386,8 +558,6 @@ static void bfq_active_insert(struct bfq_service_tree *st, BUG_ON(!bfqg); BUG_ON(!bfqd); bfqg->active_entities++; @@ -5611,7 +6762,7 @@ index a5ed694..45d63d3 100644 } #endif } -@@ -399,7 +447,7 @@ static void bfq_active_insert(struct bfq_service_tree *st, +@@ -399,7 +569,7 @@ static void bfq_active_insert(struct bfq_service_tree *st, static unsigned short bfq_ioprio_to_weight(int ioprio) { BUG_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR); @@ -5620,7 +6771,7 @@ index a5ed694..45d63d3 100644 } /** -@@ -422,9 +470,9 @@ static void bfq_get_entity(struct bfq_entity *entity) +@@ -422,9 +592,9 @@ static void bfq_get_entity(struct bfq_entity *entity) struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); if (bfqq) { @@ -5632,7 +6783,7 @@ index a5ed694..45d63d3 100644 } } -@@ -499,10 +547,6 @@ static void bfq_active_extract(struct bfq_service_tree *st, +@@ -499,10 +669,6 @@ static void bfq_active_extract(struct bfq_service_tree *st, BUG_ON(!bfqd); BUG_ON(!bfqg->active_entities); bfqg->active_entities--; @@ -5643,16 +6794,62 @@ index a5ed694..45d63d3 100644 } #endif } -@@ -552,7 +596,7 @@ static void bfq_forget_entity(struct bfq_service_tree *st, - if (bfqq) { - sd = entity->sched_data; - bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity: %p %d", +@@ -531,28 +697,32 @@ static void bfq_idle_insert(struct bfq_service_tree *st, + } + + /** +- * bfq_forget_entity - remove an entity from the wfq trees. ++ * bfq_forget_entity - do not consider entity any longer for scheduling + * @st: the service tree. + * @entity: the entity being removed. ++ * @is_in_service: true if entity is currently the in-service entity. + * +- * Update the device status and forget everything about @entity, putting +- * the device reference to it, if it is a queue. Entities belonging to +- * groups are not refcounted. ++ * Forget everything about @entity. In addition, if entity represents ++ * a queue, and the latter is not in service, then release the service ++ * reference to the queue (the one taken through bfq_get_entity). In ++ * fact, in this case, there is really no more service reference to ++ * the queue, as the latter is also outside any service tree. If, ++ * instead, the queue is in service, then __bfq_bfqd_reset_in_service ++ * will take care of putting the reference when the queue finally ++ * stops being served. + */ + static void bfq_forget_entity(struct bfq_service_tree *st, +- struct bfq_entity *entity) ++ struct bfq_entity *entity, ++ bool is_in_service) + { + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); +- struct bfq_sched_data *sd; +- + BUG_ON(!entity->on_st); + +- entity->on_st = 0; ++ entity->on_st = false; + st->wsum -= entity->weight; +- if (bfqq) { +- sd = entity->sched_data; +- bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity: %p %d", - bfqq, atomic_read(&bfqq->ref)); ++ if (bfqq && !is_in_service) { ++ bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity (before): %p %d", + bfqq, bfqq->ref); bfq_put_queue(bfqq); } } -@@ -602,7 +646,7 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, +@@ -566,7 +736,8 @@ static void bfq_put_idle_entity(struct bfq_service_tree *st, + struct bfq_entity *entity) + { + bfq_idle_extract(st, entity); +- bfq_forget_entity(st, entity); ++ bfq_forget_entity(st, entity, ++ entity == entity->sched_data->in_service_entity); + } + + /** +@@ -602,7 +773,7 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, if (entity->prio_changed) { struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); @@ -5661,7 +6858,7 @@ index a5ed694..45d63d3 100644 struct bfq_data *bfqd = NULL; struct rb_root *root; #ifdef CONFIG_BFQ_GROUP_IOSCHED -@@ -630,7 +674,10 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, +@@ -630,7 +801,10 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, entity->new_weight > BFQ_MAX_WEIGHT) { pr_crit("update_weight_prio: new_weight %d\n", entity->new_weight); @@ -5673,7 +6870,7 @@ index a5ed694..45d63d3 100644 } entity->orig_weight = entity->new_weight; if (bfqq) -@@ -661,6 +708,13 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, +@@ -661,6 +835,13 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, * associated with its new weight. */ if (prev_weight != new_weight) { @@ -5687,7 +6884,7 @@ index a5ed694..45d63d3 100644 root = bfqq ? &bfqd->queue_weights_tree : &bfqd->group_weights_tree; bfq_weights_tree_remove(bfqd, entity, root); -@@ -707,7 +761,7 @@ static void bfq_bfqq_served(struct bfq_queue *bfqq, int served) +@@ -707,7 +888,7 @@ static void bfq_bfqq_served(struct bfq_queue *bfqq, int served) st = bfq_entity_service_tree(entity); entity->service += served; @@ -5696,7 +6893,7 @@ index a5ed694..45d63d3 100644 BUG_ON(st->wsum == 0); st->vtime += bfq_delta(served, st->wsum); -@@ -716,31 +770,69 @@ static void bfq_bfqq_served(struct bfq_queue *bfqq, int served) +@@ -716,234 +897,589 @@ static void bfq_bfqq_served(struct bfq_queue *bfqq, int served) #ifdef CONFIG_BFQ_GROUP_IOSCHED bfqg_stats_set_start_empty_time(bfqq_group(bfqq)); #endif @@ -5750,107 +6947,34 @@ index a5ed694..45d63d3 100644 + if (time_ms > 0 && time_ms < timeout_ms) + tot_serv_to_charge = + (bfqd->bfq_max_budget * time_ms) / timeout_ms; - -- bfq_log_bfqq(bfqq->bfqd, bfqq, "charge_full_budget"); ++ + if (tot_serv_to_charge < entity->service) + tot_serv_to_charge = entity->service; -- bfq_bfqq_served(bfqq, entity->budget - entity->service); +- bfq_log_bfqq(bfqq->bfqd, bfqq, "charge_full_budget"); + bfq_log_bfqq(bfqq->bfqd, bfqq, + "charge_time: %lu/%u ms, %d/%d/%d sectors", + time_ms, timeout_ms, entity->service, + tot_serv_to_charge, entity->budget); -+ + +- bfq_bfqq_served(bfqq, entity->budget - entity->service); + /* Increase budget to avoid inconsistencies */ + if (tot_serv_to_charge > entity->budget) + entity->budget = tot_serv_to_charge; + + bfq_bfqq_served(bfqq, + max_t(int, 0, tot_serv_to_charge - entity->service)); - } - - /** - * __bfq_activate_entity - activate an entity. - * @entity: the entity being activated. -+ * @non_blocking_wait_rq: true if this entity was waiting for a request - * - * Called whenever an entity is activated, i.e., it is not active and one - * of its children receives a new request, or has to be reactivated due to -@@ -748,11 +840,16 @@ static void bfq_bfqq_charge_full_budget(struct bfq_queue *bfqq) - * service received if @entity is active) of the queue to calculate its - * timestamps. - */ --static void __bfq_activate_entity(struct bfq_entity *entity) -+static void __bfq_activate_entity(struct bfq_entity *entity, -+ bool non_blocking_wait_rq) - { - struct bfq_sched_data *sd = entity->sched_data; - struct bfq_service_tree *st = bfq_entity_service_tree(entity); ++} ++ ++static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, ++ struct bfq_service_tree *st, ++ bool backshifted) ++{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ bool backshifted = false; - -+ BUG_ON(!sd); -+ BUG_ON(!st); - if (entity == sd->in_service_entity) { - BUG_ON(entity->tree); - /* -@@ -770,45 +867,133 @@ static void __bfq_activate_entity(struct bfq_entity *entity) - * old start time. - */ - bfq_active_extract(st, entity); -- } else if (entity->tree == &st->idle) { -- /* -- * Must be on the idle tree, bfq_idle_extract() will -- * check for that. -- */ -- bfq_idle_extract(st, entity); -- entity->start = bfq_gt(st->vtime, entity->finish) ? -- st->vtime : entity->finish; - } else { -- /* -- * The finish time of the entity may be invalid, and -- * it is in the past for sure, otherwise the queue -- * would have been on the idle tree. -- */ -- entity->start = st->vtime; -- st->wsum += entity->weight; -- bfq_get_entity(entity); -+ unsigned long long min_vstart; -+ -+ /* See comments on bfq_fqq_update_budg_for_activation */ -+ if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) { -+ backshifted = true; -+ min_vstart = entity->finish; -+ } else -+ min_vstart = st->vtime; ++ struct bfq_sched_data *sd = entity->sched_data; + -+ if (entity->tree == &st->idle) { -+ /* -+ * Must be on the idle tree, bfq_idle_extract() will -+ * check for that. -+ */ -+ bfq_idle_extract(st, entity); -+ entity->start = bfq_gt(min_vstart, entity->finish) ? -+ min_vstart : entity->finish; -+ } else { -+ /* -+ * The finish time of the entity may be invalid, and -+ * it is in the past for sure, otherwise the queue -+ * would have been on the idle tree. -+ */ -+ entity->start = min_vstart; -+ st->wsum += entity->weight; -+ bfq_get_entity(entity); - -- BUG_ON(entity->on_st); -- entity->on_st = 1; -+ BUG_ON(entity->on_st); -+ entity->on_st = 1; -+ } - } - - st = __bfq_entity_update_weight_prio(st, entity); - bfq_calc_finish(entity, entity->budget); ++ st = __bfq_entity_update_weight_prio(st, entity); ++ bfq_calc_finish(entity, entity->budget); + + /* + * If some queues enjoy backshifting for a while, then their @@ -5910,7 +7034,7 @@ index a5ed694..45d63d3 100644 + } + } + - bfq_active_insert(st, entity); ++ bfq_active_insert(st, entity); + + if (bfqq) { + bfq_log_bfqq(bfqq->bfqd, bfqq, @@ -5926,70 +7050,479 @@ index a5ed694..45d63d3 100644 + entity->start <= st->vtime ? "" : "non ", st); +#endif + } ++ BUG_ON(RB_EMPTY_ROOT(&st->active)); ++ BUG_ON(&st->active != &sd->service_tree->active && ++ &st->active != &(sd->service_tree+1)->active && ++ &st->active != &(sd->service_tree+2)->active); } /** - * bfq_activate_entity - activate an entity and its ancestors if necessary. - * @entity: the entity to activate. -+ * @non_blocking_wait_rq: true if this entity was waiting for a request +- * __bfq_activate_entity - activate an entity. ++ * __bfq_activate_entity - handle activation of entity. + * @entity: the entity being activated. ++ * @non_blocking_wait_rq: true if entity was waiting for a request ++ * ++ * Called for a 'true' activation, i.e., if entity is not active and ++ * one of its children receives a new request. * - * Activate @entity and all the entities on the path from it to the root. +- * Called whenever an entity is activated, i.e., it is not active and one +- * of its children receives a new request, or has to be reactivated due to +- * budget exhaustion. It uses the current budget of the entity (and the +- * service received if @entity is active) of the queue to calculate its +- * timestamps. ++ * Basically, this function updates the timestamps of entity and ++ * inserts entity into its active tree, ater possible extracting it ++ * from its idle tree. + */ +-static void __bfq_activate_entity(struct bfq_entity *entity) ++static void __bfq_activate_entity(struct bfq_entity *entity, ++ bool non_blocking_wait_rq) + { + struct bfq_sched_data *sd = entity->sched_data; + struct bfq_service_tree *st = bfq_entity_service_tree(entity); ++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); ++ bool backshifted = false; ++ unsigned long long min_vstart; + +- if (entity == sd->in_service_entity) { +- BUG_ON(entity->tree); +- /* +- * If we are requeueing the current entity we have +- * to take care of not charging to it service it has +- * not received. +- */ +- bfq_calc_finish(entity, entity->service); +- entity->start = entity->finish; +- sd->in_service_entity = NULL; +- } else if (entity->tree == &st->active) { +- /* +- * Requeueing an entity due to a change of some +- * next_in_service entity below it. We reuse the +- * old start time. +- */ +- bfq_active_extract(st, entity); +- } else if (entity->tree == &st->idle) { ++ BUG_ON(!sd); ++ BUG_ON(!st); ++ ++ /* See comments on bfq_fqq_update_budg_for_activation */ ++ if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) { ++ backshifted = true; ++ min_vstart = entity->finish; ++ } else ++ min_vstart = st->vtime; ++ ++ if (entity->tree == &st->idle) { + /* + * Must be on the idle tree, bfq_idle_extract() will + * check for that. + */ + bfq_idle_extract(st, entity); +- entity->start = bfq_gt(st->vtime, entity->finish) ? +- st->vtime : entity->finish; ++ entity->start = bfq_gt(min_vstart, entity->finish) ? ++ min_vstart : entity->finish; + } else { + /* + * The finish time of the entity may be invalid, and + * it is in the past for sure, otherwise the queue + * would have been on the idle tree. + */ +- entity->start = st->vtime; ++ entity->start = min_vstart; + st->wsum += entity->weight; ++ /* ++ * entity is about to be inserted into a service tree, ++ * and then set in service: get a reference to make ++ * sure entity does not disappear until it is no ++ * longer in service or scheduled for service. ++ */ + bfq_get_entity(entity); + +- BUG_ON(entity->on_st); +- entity->on_st = 1; ++ BUG_ON(entity->on_st && bfqq); ++ ++#ifdef CONFIG_BFQ_GROUP_IOSCHED ++ if (entity->on_st && !bfqq) { ++ struct bfq_group *bfqg = ++ container_of(entity, struct bfq_group, ++ entity); ++ ++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, ++ bfqg, ++ "activate bug, class %d in_service %p", ++ bfq_class_idx(entity), sd->in_service_entity); ++ } ++#endif ++ BUG_ON(entity->on_st && !bfqq); ++ entity->on_st = true; + } + +- st = __bfq_entity_update_weight_prio(st, entity); +- bfq_calc_finish(entity, entity->budget); +- bfq_active_insert(st, entity); ++ bfq_update_fin_time_enqueue(entity, st, backshifted); + } + + /** +- * bfq_activate_entity - activate an entity and its ancestors if necessary. +- * @entity: the entity to activate. ++ * __bfq_requeue_entity - handle requeueing or repositioning of an entity. ++ * @entity: the entity being requeued or repositioned. ++ * ++ * Requeueing is needed if this entity stops being served, which ++ * happens if a leaf descendant entity has expired. On the other hand, ++ * repositioning is needed if the next_inservice_entity for the child ++ * entity has changed. See the comments inside the function for ++ * details. + * +- * Activate @entity and all the entities on the path from it to the root. ++ * Basically, this function: 1) removes entity from its active tree if ++ * present there, 2) updates the timestamps of entity and 3) inserts ++ * entity back into its active tree (in the new, right position for ++ * the new values of the timestamps). ++ */ ++static void __bfq_requeue_entity(struct bfq_entity *entity) ++{ ++ struct bfq_sched_data *sd = entity->sched_data; ++ struct bfq_service_tree *st = bfq_entity_service_tree(entity); ++ ++ BUG_ON(!sd); ++ BUG_ON(!st); ++ ++ BUG_ON(entity != sd->in_service_entity && ++ entity->tree != &st->active); ++ ++ if (entity == sd->in_service_entity) { ++ /* ++ * We are requeueing the current in-service entity, ++ * which may have to be done for one of the following ++ * reasons: ++ * - entity represents the in-service queue, and the ++ * in-service queue is being requeued after an ++ * expiration; ++ * - entity represents a group, and its budget has ++ * changed because one of its child entities has ++ * just been either activated or requeued for some ++ * reason; the timestamps of the entity need then to ++ * be updated, and the entity needs to be enqueued ++ * or repositioned accordingly. ++ * ++ * In particular, before requeueing, the start time of ++ * the entity must be moved forward to account for the ++ * service that the entity has received while in ++ * service. This is done by the next instructions. The ++ * finish time will then be updated according to this ++ * new value of the start time, and to the budget of ++ * the entity. ++ */ ++ bfq_calc_finish(entity, entity->service); ++ entity->start = entity->finish; ++ BUG_ON(entity->tree && entity->tree != &st->active); ++ /* ++ * In addition, if the entity had more than one child ++ * when set in service, then was not extracted from ++ * the active tree. This implies that the position of ++ * the entity in the active tree may need to be ++ * changed now, because we have just updated the start ++ * time of the entity, and we will update its finish ++ * time in a moment (the requeueing is then, more ++ * precisely, a repositioning in this case). To ++ * implement this repositioning, we: 1) dequeue the ++ * entity here, 2) update the finish time and ++ * requeue the entity according to the new ++ * timestamps below. ++ */ ++ if (entity->tree) ++ bfq_active_extract(st, entity); ++ } else { /* The entity is already active, and not in service */ ++ /* ++ * In this case, this function gets called only if the ++ * next_in_service entity below this entity has ++ * changed, and this change has caused the budget of ++ * this entity to change, which, finally implies that ++ * the finish time of this entity must be ++ * updated. Such an update may cause the scheduling, ++ * i.e., the position in the active tree, of this ++ * entity to change. We handle this change by: 1) ++ * dequeueing the entity here, 2) updating the finish ++ * time and requeueing the entity according to the new ++ * timestamps below. This is the same approach as the ++ * non-extracted-entity sub-case above. ++ */ ++ bfq_active_extract(st, entity); ++ } ++ ++ bfq_update_fin_time_enqueue(entity, st, false); ++} ++ ++static void __bfq_activate_requeue_entity(struct bfq_entity *entity, ++ struct bfq_sched_data *sd, ++ bool non_blocking_wait_rq) ++{ ++ struct bfq_service_tree *st = bfq_entity_service_tree(entity); ++ ++ if (sd->in_service_entity == entity || entity->tree == &st->active) ++ /* ++ * in service or already queued on the active tree, ++ * requeue or reposition ++ */ ++ __bfq_requeue_entity(entity); ++ else ++ /* ++ * Not in service and not queued on its active tree: ++ * the activity is idle and this is a true activation. ++ */ ++ __bfq_activate_entity(entity, non_blocking_wait_rq); ++} ++ ++ ++/** ++ * bfq_activate_entity - activate or requeue an entity representing a bfq_queue, ++ * and activate, requeue or reposition all ancestors ++ * for which such an update becomes necessary. ++ * @entity: the entity to activate. ++ * @non_blocking_wait_rq: true if this entity was waiting for a request ++ * @requeue: true if this is a requeue, which implies that bfqq is ++ * being expired; thus ALL its ancestors stop being served and must ++ * therefore be requeued */ -static void bfq_activate_entity(struct bfq_entity *entity) -+static void bfq_activate_entity(struct bfq_entity *entity, -+ bool non_blocking_wait_rq) ++static void bfq_activate_requeue_entity(struct bfq_entity *entity, ++ bool non_blocking_wait_rq, ++ bool requeue) { struct bfq_sched_data *sd; for_each_entity(entity) { - __bfq_activate_entity(entity); +- + BUG_ON(!entity); -+ __bfq_activate_entity(entity, non_blocking_wait_rq); + sd = entity->sched_data; +- if (!bfq_update_next_in_service(sd)) +- /* +- * No need to propagate the activation to the +- * upper entities, as they will be updated when +- * the in-service entity is rescheduled. +- */ ++ __bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq); ++ ++ BUG_ON(RB_EMPTY_ROOT(&sd->service_tree->active) && ++ RB_EMPTY_ROOT(&(sd->service_tree+1)->active) && ++ RB_EMPTY_ROOT(&(sd->service_tree+2)->active)); ++ ++ if (!bfq_update_next_in_service(sd, entity) && !requeue) { ++ BUG_ON(!sd->next_in_service); + break; ++ } ++ BUG_ON(!sd->next_in_service); + } + } + + /** + * __bfq_deactivate_entity - deactivate an entity from its service tree. + * @entity: the entity to deactivate. +- * @requeue: if false, the entity will not be put into the idle tree. ++ * @ins_into_idle_tree: if false, the entity will not be put into the ++ * idle tree. + * +- * Deactivate an entity, independently from its previous state. If the +- * entity was not on a service tree just return, otherwise if it is on +- * any scheduler tree, extract it from that tree, and if necessary +- * and if the caller did not specify @requeue, put it on the idle tree. +- * +- * Return %1 if the caller should update the entity hierarchy, i.e., +- * if the entity was in service or if it was the next_in_service for +- * its sched_data; return %0 otherwise. ++ * Deactivates an entity, independently from its previous state. Must ++ * be invoked only if entity is on a service tree. Extracts the entity ++ * from that tree, and if necessary and allowed, puts it on the idle ++ * tree. + */ +-static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue) ++static bool __bfq_deactivate_entity(struct bfq_entity *entity, ++ bool ins_into_idle_tree) + { + struct bfq_sched_data *sd = entity->sched_data; + struct bfq_service_tree *st; +- int was_in_service; +- int ret = 0; ++ bool is_in_service; + +- if (sd == NULL || !entity->on_st) /* never activated, or inactive */ +- return 0; ++ if (!entity->on_st) { /* entity never activated, or already inactive */ ++ BUG_ON(sd && entity == sd->in_service_entity); ++ return false; ++ } + ++ /* ++ * If we get here, then entity is active, which implies that ++ * bfq_group_set_parent has already been invoked for the group ++ * represented by entity. Therefore, the field ++ * entity->sched_data has been set, and we can safely use it. ++ */ + st = bfq_entity_service_tree(entity); +- was_in_service = entity == sd->in_service_entity; ++ is_in_service = entity == sd->in_service_entity; + +- BUG_ON(was_in_service && entity->tree); ++ BUG_ON(is_in_service && entity->tree && entity->tree != &st->active); + +- if (was_in_service) { ++ if (is_in_service) + bfq_calc_finish(entity, entity->service); +- sd->in_service_entity = NULL; +- } else if (entity->tree == &st->active) ++ ++ if (entity->tree == &st->active) + bfq_active_extract(st, entity); +- else if (entity->tree == &st->idle) ++ else if (!is_in_service && entity->tree == &st->idle) + bfq_idle_extract(st, entity); + else if (entity->tree) + BUG(); +- if (was_in_service || sd->next_in_service == entity) +- ret = bfq_update_next_in_service(sd); +- +- if (!requeue || !bfq_gt(entity->finish, st->vtime)) +- bfq_forget_entity(st, entity); ++ if (!ins_into_idle_tree || !bfq_gt(entity->finish, st->vtime)) ++ bfq_forget_entity(st, entity, is_in_service); + else + bfq_idle_insert(st, entity); + +- BUG_ON(sd->in_service_entity == entity); +- BUG_ON(sd->next_in_service == entity); +- +- return ret; ++ return true; + } + + /** +- * bfq_deactivate_entity - deactivate an entity. ++ * bfq_deactivate_entity - deactivate an entity representing a bfq_queue. + * @entity: the entity to deactivate. +- * @requeue: true if the entity can be put on the idle tree ++ * @ins_into_idle_tree: true if the entity can be put on the idle tree + */ +-static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue) ++static void bfq_deactivate_entity(struct bfq_entity *entity, ++ bool ins_into_idle_tree, ++ bool expiration) + { + struct bfq_sched_data *sd; +- struct bfq_entity *parent; ++ struct bfq_entity *parent = NULL; + + for_each_entity_safe(entity, parent) { sd = entity->sched_data; - if (!bfq_update_next_in_service(sd)) -@@ -889,23 +1074,24 @@ static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue) - if (!__bfq_deactivate_entity(entity, requeue)) +- if (!__bfq_deactivate_entity(entity, requeue)) ++ BUG_ON(sd == NULL); /* ++ * It would mean that this is the ++ * root group. ++ */ ++ ++ BUG_ON(expiration && entity != sd->in_service_entity); ++ ++ BUG_ON(entity != sd->in_service_entity && ++ entity->tree == ++ &bfq_entity_service_tree(entity)->active && ++ !sd->next_in_service); ++ ++ if (!__bfq_deactivate_entity(entity, ins_into_idle_tree)) { /* - * The parent entity is still backlogged, and - * we don't need to update it as it is still - * in service. -+ * next_in_service has not been changed, so -+ * no upwards update is needed ++ * entity is not in any tree any more, so ++ * this deactivation is a no-op, and there is ++ * nothing to change for upper-level entities ++ * (in case of expiration, this can never ++ * happen). */ - break; +- break; ++ BUG_ON(expiration); /* ++ * entity cannot be already out of ++ * any tree ++ */ ++ return; ++ } ++ ++ if (sd->next_in_service == entity) ++ /* ++ * entity was the next_in_service entity, ++ * then, since entity has just been ++ * deactivated, a new one must be found. ++ */ ++ bfq_update_next_in_service(sd, NULL); - if (sd->next_in_service) +- if (sd->next_in_service) ++ if (sd->next_in_service) { /* - * The parent entity is still backlogged and - * the budgets on the path towards the root - * need to be updated. + * The parent entity is still backlogged, -+ * because next_in_service is not NULL, and -+ * next_in_service has been updated (see -+ * comment on the body of the above if): -+ * upwards update of the schedule is needed. ++ * because next_in_service is not NULL. So, no ++ * further upwards deactivation must be ++ * performed. Yet, next_in_service has ++ * changed. Then the schedule does need to be ++ * updated upwards. */ - goto update; +- goto update; ++ BUG_ON(sd->next_in_service == entity); ++ break; ++ } /* - * If we reach there the parent is no more backlogged and - * we want to propagate the dequeue upwards. -+ * If we get here, then the parent is no more backlogged and -+ * we want to propagate the deactivation upwards. ++ * If we get here, then the parent is no more ++ * backlogged and we need to propagate the ++ * deactivation upwards. Thus let the loop go on. */ - requeue = 1; - } -@@ -915,9 +1101,23 @@ static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue) - update: +- requeue = 1; +- } + +- return; ++ /* ++ * Also let parent be queued into the idle tree on ++ * deactivation, to preserve service guarantees, and ++ * assuming that who invoked this function does not ++ * need parent entities too to be removed completely. ++ */ ++ ins_into_idle_tree = true; ++ } + +-update: ++ /* ++ * If the deactivation loop is fully executed, then there are ++ * no more entities to touch and next loop is not executed at ++ * all. Otherwise, requeue remaining entities if they are ++ * about to stop receiving service, or reposition them if this ++ * is not the case. ++ */ entity = parent; for_each_entity(entity) { - __bfq_activate_entity(entity); + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ __bfq_activate_entity(entity, false); ++ ++ /* ++ * Invoke __bfq_requeue_entity on entity, even if ++ * already active, to requeue/reposition it in the ++ * active tree (because sd->next_in_service has ++ * changed) ++ */ ++ __bfq_requeue_entity(entity); sd = entity->sched_data; +- if (!bfq_update_next_in_service(sd)) ++ BUG_ON(expiration && sd->in_service_entity != entity); ++ + if (bfqq) + bfq_log_bfqq(bfqq->bfqd, bfqq, + "invoking udpdate_next for this queue"); @@ -6003,57 +7536,195 @@ index a5ed694..45d63d3 100644 + "invoking udpdate_next for this entity"); + } +#endif - if (!bfq_update_next_in_service(sd)) ++ if (!bfq_update_next_in_service(sd, entity) && ++ !expiration) ++ /* ++ * next_in_service unchanged or not causing ++ * any change in entity->parent->sd, and no ++ * requeueing needed for expiration: stop ++ * here. ++ */ break; } -@@ -943,7 +1143,23 @@ static void bfq_update_vtime(struct bfq_service_tree *st) + } - entry = rb_entry(node, struct bfq_entity, rb_node); - if (bfq_gt(entry->min_start, st->vtime)) { -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entry); - st->vtime = entry->min_start; + /** +- * bfq_update_vtime - update vtime if necessary. ++ * bfq_calc_vtime_jump - compute the value to which the vtime should jump, ++ * if needed, to have at least one entity eligible. + * @st: the service tree to act upon. + * +- * If necessary update the service tree vtime to have at least one +- * eligible entity, skipping to its start time. Assumes that the +- * active tree of the device is not empty. +- * +- * NOTE: this hierarchical implementation updates vtimes quite often, +- * we may end up with reactivated processes getting timestamps after a +- * vtime skip done because we needed a ->first_active entity on some +- * intermediate node. ++ * Assumes that st is not empty. + */ +-static void bfq_update_vtime(struct bfq_service_tree *st) ++static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st) + { +- struct bfq_entity *entry; +- struct rb_node *node = st->active.rb_node; ++ struct bfq_entity *root_entity = bfq_root_active_entity(&st->active); + ++ if (bfq_gt(root_entity->min_start, st->vtime)) { ++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(root_entity); + +- entry = rb_entry(node, struct bfq_entity, rb_node); +- if (bfq_gt(entry->min_start, st->vtime)) { +- st->vtime = entry->min_start; + if (bfqq) + bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "update_vtime: new vtime %llu %p", -+ ((st->vtime>>10)*1000)>>12, st); ++ "calc_vtime_jump: new value %llu", ++ root_entity->min_start); +#ifdef CONFIG_BFQ_GROUP_IOSCHED + else { + struct bfq_group *bfqg = -+ container_of(entry, struct bfq_group, entity); ++ container_of(root_entity, struct bfq_group, ++ entity); + + bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "update_vtime: new vtime %llu %p", -+ ((st->vtime>>10)*1000)>>12, st); ++ "calc_vtime_jump: new value %llu", ++ root_entity->min_start); + } +#endif ++ return root_entity->min_start; ++ } ++ return st->vtime; ++} ++ ++static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value) ++{ ++ if (new_value > st->vtime) { ++ st->vtime = new_value; bfq_forget_idle(st); } } -@@ -996,10 +1212,11 @@ left: - * Update the virtual time in @st and return the first eligible entity - * it contains. +@@ -952,6 +1488,7 @@ static void bfq_update_vtime(struct bfq_service_tree *st) + * bfq_first_active_entity - find the eligible entity with + * the smallest finish time + * @st: the service tree to select from. ++ * @vtime: the system virtual to use as a reference for eligibility + * + * This function searches the first schedulable entity, starting from the + * root of the tree and going on the left every time on this side there is +@@ -959,7 +1496,8 @@ static void bfq_update_vtime(struct bfq_service_tree *st) + * the right is followed only if a) the left subtree contains no eligible + * entities and b) no eligible entity has been found yet. + */ +-static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st) ++static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st, ++ u64 vtime) + { + struct bfq_entity *entry, *first = NULL; + struct rb_node *node = st->active.rb_node; +@@ -967,15 +1505,15 @@ static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st) + while (node) { + entry = rb_entry(node, struct bfq_entity, rb_node); + left: +- if (!bfq_gt(entry->start, st->vtime)) ++ if (!bfq_gt(entry->start, vtime)) + first = entry; + +- BUG_ON(bfq_gt(entry->min_start, st->vtime)); ++ BUG_ON(bfq_gt(entry->min_start, vtime)); + + if (node->rb_left) { + entry = rb_entry(node->rb_left, + struct bfq_entity, rb_node); +- if (!bfq_gt(entry->min_start, st->vtime)) { ++ if (!bfq_gt(entry->min_start, vtime)) { + node = node->rb_left; + goto left; + } +@@ -993,31 +1531,84 @@ static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st) + * __bfq_lookup_next_entity - return the first eligible entity in @st. + * @st: the service tree. + * +- * Update the virtual time in @st and return the first eligible entity +- * it contains. ++ * If there is no in-service entity for the sched_data st belongs to, ++ * then return the entity that will be set in service if: ++ * 1) the parent entity this st belongs to is set in service; ++ * 2) no entity belonging to such parent entity undergoes a state change ++ * that would influence the timestamps of the entity (e.g., becomes idle, ++ * becomes backlogged, changes its budget, ...). ++ * ++ * In this first case, update the virtual time in @st too (see the ++ * comments on this update inside the function). ++ * ++ * In constrast, if there is an in-service entity, then return the ++ * entity that would be set in service if not only the above ++ * conditions, but also the next one held true: the currently ++ * in-service entity, on expiration, ++ * 1) gets a finish time equal to the current one, or ++ * 2) is not eligible any more, or ++ * 3) is idle. */ -static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st, - bool force) +static struct bfq_entity * -+__bfq_lookup_next_entity(struct bfq_service_tree *st, bool force) ++__bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service ++#if 0 ++ , bool force ++#endif ++ ) { - struct bfq_entity *entity, *new_next_in_service = NULL; +- struct bfq_entity *entity, *new_next_in_service = NULL; ++ struct bfq_entity *entity ++#if 0 ++ , *new_next_in_service = NULL ++#endif ++ ; ++ u64 new_vtime; + struct bfq_queue *bfqq; if (RB_EMPTY_ROOT(&st->active)) return NULL; -@@ -1008,6 +1225,24 @@ static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st, - entity = bfq_first_active_entity(st); - BUG_ON(bfq_gt(entity->start, st->vtime)); +- bfq_update_vtime(st); +- entity = bfq_first_active_entity(st); +- BUG_ON(bfq_gt(entity->start, st->vtime)); ++ /* ++ * Get the value of the system virtual time for which at ++ * least one entity is eligible. ++ */ ++ new_vtime = bfq_calc_vtime_jump(st); + + /* +- * If the chosen entity does not match with the sched_data's +- * next_in_service and we are forcedly serving the IDLE priority +- * class tree, bubble up budget update. ++ * If there is no in-service entity for the sched_data this ++ * active tree belongs to, then push the system virtual time ++ * up to the value that guarantees that at least one entity is ++ * eligible. If, instead, there is an in-service entity, then ++ * do not make any such update, because there is already an ++ * eligible entity, namely the in-service one (even if the ++ * entity is not on st, because it was extracted when set in ++ * service). + */ +- if (unlikely(force && entity != entity->sched_data->next_in_service)) { +- new_next_in_service = entity; +- for_each_entity(new_next_in_service) +- bfq_update_budget(new_next_in_service); ++ if (!in_service) ++ bfq_update_vtime(st, new_vtime); ++ ++ entity = bfq_first_active_entity(st, new_vtime); ++ BUG_ON(bfq_gt(entity->start, new_vtime)); ++ ++ /* Log some information */ + bfqq = bfq_entity_to_bfqq(entity); + if (bfqq) + bfq_log_bfqq(bfqq->bfqd, bfqq, + "__lookup_next: start %llu vtime %llu st %p", + ((entity->start>>10)*1000)>>12, -+ ((st->vtime>>10)*1000)>>12, st); ++ ((new_vtime>>10)*1000)>>12, st); +#ifdef CONFIG_BFQ_GROUP_IOSCHED + else { + struct bfq_group *bfqg = @@ -6062,78 +7733,115 @@ index a5ed694..45d63d3 100644 + bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, + "__lookup_next: start %llu vtime %llu st %p", + ((entity->start>>10)*1000)>>12, -+ ((st->vtime>>10)*1000)>>12, st); -+ } ++ ((new_vtime>>10)*1000)>>12, st); + } +#endif + - /* - * If the chosen entity does not match with the sched_data's - * next_in_service and we are forcedly serving the IDLE priority -@@ -1043,11 +1278,36 @@ static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, ++ BUG_ON(!entity); - BUG_ON(sd->in_service_entity); + return entity; + } +@@ -1025,50 +1616,81 @@ static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st, + /** + * bfq_lookup_next_entity - return the first eligible entity in @sd. + * @sd: the sched_data. +- * @extract: if true the returned entity will be also extracted from @sd. + * +- * NOTE: since we cache the next_in_service entity at each level of the +- * hierarchy, the complexity of the lookup can be decreased with +- * absolutely no effort just returning the cached next_in_service value; +- * we prefer to do full lookups to test the consistency of * the data +- * structures. ++ * This function is invoked when there has been a change in the trees ++ * for sd, and we need know what is the new next entity after this ++ * change. + */ +-static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, +- int extract, +- struct bfq_data *bfqd) ++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd) + { + struct bfq_service_tree *st = sd->service_tree; +- struct bfq_entity *entity; +- int i = 0; +- +- BUG_ON(sd->in_service_entity); ++ struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1); ++ struct bfq_entity *entity = NULL; ++ struct bfq_queue *bfqq; ++ int class_idx = 0; +- if (bfqd && +- jiffies - bfqd->bfq_class_idle_last_service > BFQ_CL_IDLE_TIMEOUT) { +- entity = __bfq_lookup_next_entity(st + BFQ_IOPRIO_CLASSES - 1, +- true); +- if (entity) { +- i = BFQ_IOPRIO_CLASSES - 1; +- bfqd->bfq_class_idle_last_service = jiffies; +- sd->next_in_service = entity; +- } ++ BUG_ON(!sd); ++ BUG_ON(!st); + /* + * Choose from idle class, if needed to guarantee a minimum -+ * bandwidth to this class. This should also mitigate ++ * bandwidth to this class (and if there is some active entity ++ * in idle class). This should also mitigate + * priority-inversion problems in case a low priority task is + * holding file system resources. + */ - if (bfqd && -- jiffies - bfqd->bfq_class_idle_last_service > BFQ_CL_IDLE_TIMEOUT) { -+ jiffies - bfqd->bfq_class_idle_last_service > -+ BFQ_CL_IDLE_TIMEOUT) { - entity = __bfq_lookup_next_entity(st + BFQ_IOPRIO_CLASSES - 1, - true); - if (entity) { -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); ++ if (time_is_before_jiffies(sd->bfq_class_idle_last_service + ++ BFQ_CL_IDLE_TIMEOUT)) { ++ if (!RB_EMPTY_ROOT(&idle_class_st->active)) ++ class_idx = BFQ_IOPRIO_CLASSES - 1; ++ /* About to be served if backlogged, or not yet backlogged */ ++ sd->bfq_class_idle_last_service = jiffies; + } +- for (; i < BFQ_IOPRIO_CLASSES; i++) { +- entity = __bfq_lookup_next_entity(st + i, false); +- if (entity) { +- if (extract) { +- bfq_check_next_in_service(sd, entity); +- bfq_active_extract(st + i, entity); +- sd->in_service_entity = entity; +- sd->next_in_service = NULL; +- } + -+ if (bfqq) -+ bfq_log_bfqq(bfqd, bfqq, -+ "idle chosen from st %p %d", -+ st + BFQ_IOPRIO_CLASSES - 1, -+ BFQ_IOPRIO_CLASSES - 1); -+#ifdef CONFIG_BFQ_GROUP_IOSCHED -+ else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); ++ /* ++ * Find the next entity to serve for the highest-priority ++ * class, unless the idle class needs to be served. ++ */ ++ for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) { ++ entity = __bfq_lookup_next_entity(st + class_idx, ++ sd->in_service_entity); + -+ bfq_log_bfqg(bfqd, bfqg, -+ "idle chosen from st %p %d", -+ st + BFQ_IOPRIO_CLASSES - 1, -+ BFQ_IOPRIO_CLASSES - 1); -+ } -+#endif - i = BFQ_IOPRIO_CLASSES - 1; - bfqd->bfq_class_idle_last_service = jiffies; - sd->next_in_service = entity; -@@ -1056,6 +1316,25 @@ static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, - for (; i < BFQ_IOPRIO_CLASSES; i++) { - entity = __bfq_lookup_next_entity(st + i, false); - if (entity) { -+ if (bfqd != NULL) { -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); ++ if (entity) + break; +- } + } + ++ BUG_ON(!entity && ++ (!RB_EMPTY_ROOT(&st->active) || !RB_EMPTY_ROOT(&(st+1)->active) || ++ !RB_EMPTY_ROOT(&(st+2)->active))); + -+ if (bfqq) -+ bfq_log_bfqq(bfqd, bfqq, -+ "chosen from st %p %d", -+ st + i, i); ++ if (!entity) ++ return NULL; ++ ++ /* Log some information */ ++ bfqq = bfq_entity_to_bfqq(entity); ++ if (bfqq) ++ bfq_log_bfqq(bfqq->bfqd, bfqq, "chosen from st %p %d", ++ st + class_idx, class_idx); +#ifdef CONFIG_BFQ_GROUP_IOSCHED -+ else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); ++ else { ++ struct bfq_group *bfqg = ++ container_of(entity, struct bfq_group, entity); + -+ bfq_log_bfqg(bfqd, bfqg, -+ "chosen from st %p %d", -+ st + i, i); -+ } ++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, ++ "chosen from st %p %d", ++ st + class_idx, class_idx); ++ } +#endif -+ } + - if (extract) { - bfq_check_next_in_service(sd, entity); - bfq_active_extract(st + i, entity); -@@ -1069,6 +1348,13 @@ static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, return entity; } @@ -6147,10 +7855,19 @@ index a5ed694..45d63d3 100644 /* * Get next queue for service. */ -@@ -1085,7 +1371,36 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) +@@ -1083,58 +1705,218 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) + if (bfqd->busy_queues == 0) + return NULL; ++ /* ++ * Traverse the path from the root to the leaf entity to ++ * serve. Set in service all the entities visited along the ++ * way. ++ */ sd = &bfqd->root_group->sched_data; for (; sd ; sd = entity->my_sched_data) { +- entity = bfq_lookup_next_entity(sd, 1, bfqd); +- BUG_ON(!entity); +#ifdef CONFIG_BFQ_GROUP_IOSCHED + if (entity) { + struct bfq_group *bfqg = @@ -6158,13 +7875,96 @@ index a5ed694..45d63d3 100644 + + bfq_log_bfqg(bfqd, bfqg, + "get_next_queue: lookup in this group"); -+ } else ++ if (!sd->next_in_service) ++ pr_crit("get_next_queue: lookup in this group"); ++ } else { + bfq_log_bfqg(bfqd, bfqd->root_group, + "get_next_queue: lookup in root group"); ++ if (!sd->next_in_service) ++ pr_crit("get_next_queue: lookup in root group"); ++ } +#endif + - entity = bfq_lookup_next_entity(sd, 1, bfqd); ++ BUG_ON(!sd->next_in_service); ++ ++ /* ++ * WARNING. We are about to set the in-service entity ++ * to sd->next_in_service, i.e., to the (cached) value ++ * returned by bfq_lookup_next_entity(sd) the last ++ * time it was invoked, i.e., the last time when the ++ * service order in sd changed as a consequence of the ++ * activation or deactivation of an entity. In this ++ * respect, if we execute bfq_lookup_next_entity(sd) ++ * in this very moment, it may, although with low ++ * probability, yield a different entity than that ++ * pointed to by sd->next_in_service. This rare event ++ * happens in case there was no CLASS_IDLE entity to ++ * serve for sd when bfq_lookup_next_entity(sd) was ++ * invoked for the last time, while there is now one ++ * such entity. ++ * ++ * If the above event happens, then the scheduling of ++ * such entity in CLASS_IDLE is postponed until the ++ * service of the sd->next_in_service entity ++ * finishes. In fact, when the latter is expired, ++ * bfq_lookup_next_entity(sd) gets called again, ++ * exactly to update sd->next_in_service. ++ */ ++ ++ /* Make next_in_service entity become in_service_entity */ ++ entity = sd->next_in_service; ++ sd->in_service_entity = entity; ++ ++ /* ++ * Reset the accumulator of the amount of service that ++ * the entity is about to receive. ++ */ + entity->service = 0; ++ ++ /* ++ * If entity is no longer a candidate for next ++ * service, then we extract it from its active tree, ++ * for the following reason. To further boost the ++ * throughput in some special case, BFQ needs to know ++ * which is the next candidate entity to serve, while ++ * there is already an entity in service. In this ++ * respect, to make it easy to compute/update the next ++ * candidate entity to serve after the current ++ * candidate has been set in service, there is a case ++ * where it is necessary to extract the current ++ * candidate from its service tree. Such a case is ++ * when the entity just set in service cannot be also ++ * a candidate for next service. Details about when ++ * this conditions holds are reported in the comments ++ * on the function bfq_no_longer_next_in_service() ++ * invoked below. ++ */ ++ if (bfq_no_longer_next_in_service(entity)) ++ bfq_active_extract(bfq_entity_service_tree(entity), ++ entity); ++ ++ /* ++ * For the same reason why we may have just extracted ++ * entity from its active tree, we may need to update ++ * next_in_service for the sched_data of entity too, ++ * regardless of whether entity has been extracted. ++ * In fact, even if entity has not been extracted, a ++ * descendant entity may get extracted. Such an event ++ * would cause a change in next_in_service for the ++ * level of the descendant entity, and thus possibly ++ * back to upper levels. ++ * ++ * We cannot perform the resulting needed update ++ * before the end of this loop, because, to know which ++ * is the correct next-to-serve candidate entity for ++ * each level, we need first to find the leaf entity ++ * to set in service. In fact, only after we know ++ * which is the next-to-serve leaf entity, we can ++ * discover whether the parent entity of the leaf ++ * entity becomes the next-to-serve, and so on. ++ */ + ++ /* Log some information */ + bfqq = bfq_entity_to_bfqq(entity); + if (bfqq) + bfq_log_bfqq(bfqd, bfqq, @@ -6181,38 +7981,94 @@ index a5ed694..45d63d3 100644 + } +#endif + - BUG_ON(!entity); - entity->service = 0; } -@@ -1103,8 +1418,9 @@ static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) + ++ BUG_ON(!entity); + bfqq = bfq_entity_to_bfqq(entity); + BUG_ON(!bfqq); + ++ /* ++ * We can finally update all next-to-serve entities along the ++ * path from the leaf entity just set in service to the root. ++ */ ++ for_each_entity(entity) { ++ struct bfq_sched_data *sd = entity->sched_data; ++ ++ if(!bfq_update_next_in_service(sd, NULL)) ++ break; ++ } ++ + return bfqq; + } + + static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) + { ++ struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue; ++ struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity; ++ struct bfq_entity *entity = in_serv_entity; ++ + if (bfqd->in_service_bic) { + put_io_context(bfqd->in_service_bic->icq.ioc); bfqd->in_service_bic = NULL; } -+ bfq_clear_bfqq_wait_request(bfqd->in_service_queue); ++ bfq_clear_bfqq_wait_request(in_serv_bfqq); + hrtimer_try_to_cancel(&bfqd->idle_slice_timer); bfqd->in_service_queue = NULL; - del_timer(&bfqd->idle_slice_timer); ++ ++ /* ++ * When this function is called, all in-service entities have ++ * been properly deactivated or requeued, so we can safely ++ * execute the final step: reset in_service_entity along the ++ * path from entity to the root. ++ */ ++ for_each_entity(entity) ++ entity->sched_data->in_service_entity = NULL; ++ ++ /* ++ * in_serv_entity is no longer in service, so, if it is in no ++ * service tree either, then release the service reference to ++ * the queue it represents (taken with bfq_get_entity). ++ */ ++ if (!in_serv_entity->on_st) ++ bfq_put_queue(in_serv_bfqq); } static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, -@@ -1112,9 +1428,7 @@ static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, +- int requeue) ++ bool ins_into_idle_tree, bool expiration) { struct bfq_entity *entity = &bfqq->entity; - if (bfqq == bfqd->in_service_queue) - __bfq_bfqd_reset_in_service(bfqd); - -+ BUG_ON(bfqq == bfqd->in_service_queue); - bfq_deactivate_entity(entity, requeue); +- bfq_deactivate_entity(entity, requeue); ++ bfq_deactivate_entity(entity, ins_into_idle_tree, expiration); } -@@ -1122,12 +1436,11 @@ static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) + static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) { struct bfq_entity *entity = &bfqq->entity; ++ struct bfq_service_tree *st = bfq_entity_service_tree(entity); ++ ++ BUG_ON(bfqq == bfqd->in_service_queue); ++ BUG_ON(entity->tree != &st->active && entity->tree != &st->idle && ++ entity->on_st); ++ ++ bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq), ++ false); ++ bfq_clear_bfqq_non_blocking_wait_rq(bfqq); ++} ++ ++static void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) ++{ ++ struct bfq_entity *entity = &bfqq->entity; - bfq_activate_entity(entity); -+ bfq_activate_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq)); -+ bfq_clear_bfqq_non_blocking_wait_rq(bfqq); ++ bfq_activate_requeue_entity(entity, false, ++ bfqq == bfqd->in_service_queue); } -#ifdef CONFIG_BFQ_GROUP_IOSCHED @@ -6221,15 +8077,17 @@ index a5ed694..45d63d3 100644 /* * Called when the bfqq no longer has requests pending, remove it from -@@ -1138,6 +1451,7 @@ static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, +- * the service tree. ++ * the service tree. As a special case, it can be invoked during an ++ * expiration. + */ + static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, +- int requeue) ++ bool expiration) { BUG_ON(!bfq_bfqq_busy(bfqq)); BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); -+ BUG_ON(bfqq == bfqd->in_service_queue); - - bfq_log_bfqq(bfqd, bfqq, "del from busy"); - -@@ -1146,27 +1460,20 @@ static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -1146,27 +1928,18 @@ static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, BUG_ON(bfqd->busy_queues == 0); bfqd->busy_queues--; @@ -6254,16 +8112,15 @@ index a5ed694..45d63d3 100644 -#ifdef CONFIG_BFQ_GROUP_IOSCHED bfqg_stats_update_dequeue(bfqq_group(bfqq)); -#endif -+ -+ BUG_ON(bfqq->entity.budget < 0); - bfq_deactivate_bfqq(bfqd, bfqq, requeue); -+ +- bfq_deactivate_bfqq(bfqd, bfqq, requeue); + BUG_ON(bfqq->entity.budget < 0); ++ ++ bfq_deactivate_bfqq(bfqd, bfqq, true, expiration); } /* -@@ -1184,16 +1491,11 @@ static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -1184,16 +1957,11 @@ static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfq_mark_bfqq_busy(bfqq); bfqd->busy_queues++; @@ -6283,13 +8140,13 @@ index a5ed694..45d63d3 100644 bfqd->wr_busy_queues++; } diff --git a/block/bfq.h b/block/bfq.h -index fcce855..ea1e7d8 100644 +index fcce855..8cd2b6f 100644 --- a/block/bfq.h +++ b/block/bfq.h @@ -1,5 +1,5 @@ /* - * BFQ-v7r11 for 4.5.0: data structures and common functions prototypes. -+ * BFQ-v8r4 for 4.8.0: data structures and common functions prototypes. ++ * BFQ v8r11 for 4.10.0: data structures and common functions prototypes. * * Based on ideas and code from CFQ: * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> @@ -6300,7 +8157,7 @@ index fcce855..ea1e7d8 100644 - * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it> + * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it> + * -+ * Copyright (C) 2016 Paolo Valente <paolo.valente@linaro.org> ++ * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org> */ #ifndef _BFQ_H @@ -6372,7 +8229,7 @@ index fcce855..ea1e7d8 100644 * * The supported ioprio_classes are the same as in CFQ, in descending * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. -@@ -79,48 +83,29 @@ struct bfq_service_tree { +@@ -79,48 +83,32 @@ struct bfq_service_tree { * All the fields are protected by the queue lock of the containing bfqd. */ struct bfq_sched_data { @@ -6382,6 +8239,9 @@ index fcce855..ea1e7d8 100644 struct bfq_entity *next_in_service; + /* array of service trees, one per ioprio_class */ struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES]; ++ /* last time CLASS_IDLE was served */ ++ unsigned long bfq_class_idle_last_service; ++ }; /** @@ -6430,7 +8290,7 @@ index fcce855..ea1e7d8 100644 * * A bfq_entity is used to represent either a bfq_queue (leaf node in the * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each -@@ -147,27 +132,52 @@ struct bfq_weight_counter { +@@ -147,27 +135,52 @@ struct bfq_weight_counter { * containing bfqd. */ struct bfq_entity { @@ -6439,11 +8299,12 @@ index fcce855..ea1e7d8 100644 + /* pointer to the weight counter associated with this entity */ struct bfq_weight_counter *weight_counter; +- int on_st; + /* -+ * flag, true if the entity is on a tree (either the active or -+ * the idle one of its service_tree). ++ * Flag, true if the entity is on a tree (either the active or ++ * the idle one of its service_tree) or is in service. + */ - int on_st; ++ bool on_st; - u64 finish; - u64 start; @@ -6489,7 +8350,7 @@ index fcce855..ea1e7d8 100644 int prio_changed; }; -@@ -175,56 +185,6 @@ struct bfq_group; +@@ -175,56 +188,6 @@ struct bfq_group; /** * struct bfq_queue - leaf schedulable entity. @@ -6546,7 +8407,7 @@ index fcce855..ea1e7d8 100644 * * A bfq_queue is a leaf request queue; it can be associated with an * io_context or more, if it is async or shared between cooperating -@@ -235,117 +195,174 @@ struct bfq_group; +@@ -235,117 +198,175 @@ struct bfq_group; * All the fields are protected by the queue lock of the containing bfqd. */ struct bfq_queue { @@ -6764,10 +8625,11 @@ index fcce855..ea1e7d8 100644 + unsigned long saved_wr_coeff; + unsigned long saved_last_wr_start_finish; + unsigned long saved_wr_start_at_switch_to_srt; ++ unsigned int saved_wr_cur_max_time; }; enum bfq_device_speed { -@@ -354,224 +371,234 @@ enum bfq_device_speed { +@@ -354,224 +375,232 @@ enum bfq_device_speed { }; /** @@ -7024,10 +8886,9 @@ index fcce855..ea1e7d8 100644 + /* maximum allowed backward seek */ unsigned int bfq_back_max; - unsigned int bfq_slice_idle; +- u64 bfq_class_idle_last_service; + /* maximum idling time */ + u32 bfq_slice_idle; -+ /* last time CLASS_IDLE was served */ - u64 bfq_class_idle_last_service; + /* user-configured max budget value (0 for auto-tuning) */ int bfq_user_max_budget; @@ -7150,7 +9011,7 @@ index fcce855..ea1e7d8 100644 BFQ_BFQQ_FLAG_IO_bound, /* * bfqq has timed-out at least once * having consumed at most 2/10 of -@@ -581,17 +608,12 @@ enum bfqq_state_flags { +@@ -581,17 +610,12 @@ enum bfqq_state_flags { * bfqq activated in a large burst, * see comments to bfq_handle_burst. */ @@ -7169,7 +9030,7 @@ index fcce855..ea1e7d8 100644 }; #define BFQ_BFQQ_FNS(name) \ -@@ -608,25 +630,53 @@ static int bfq_bfqq_##name(const struct bfq_queue *bfqq) \ +@@ -608,28 +632,94 @@ static int bfq_bfqq_##name(const struct bfq_queue *bfqq) \ return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0; \ } @@ -7194,6 +9055,43 @@ index fcce855..ea1e7d8 100644 /* Logging facilities. */ -#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ - blk_add_trace_msg((bfqd)->queue, "bfq%d " fmt, (bfqq)->pid, ##args) ++#ifdef CONFIG_BFQ_REDIRECT_TO_CONSOLE ++#ifdef CONFIG_BFQ_GROUP_IOSCHED ++static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); ++static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); ++ ++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ ++ char __pbuf[128]; \ ++ \ ++ assert_spin_locked((bfqd)->queue->queue_lock); \ ++ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \ ++ pr_crit("bfq%d%c %s " fmt "\n", \ ++ (bfqq)->pid, \ ++ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ ++ __pbuf, ##args); \ ++} while (0) ++ ++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ ++ char __pbuf[128]; \ ++ \ ++ blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \ ++ pr_crit("%s " fmt "\n", __pbuf, ##args); \ ++} while (0) ++ ++#else /* CONFIG_BFQ_GROUP_IOSCHED */ ++ ++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ ++ pr_crit("bfq%d%c " fmt "\n", (bfqq)->pid, \ ++ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ ++ ##args) ++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) ++ ++#endif /* CONFIG_BFQ_GROUP_IOSCHED */ ++ ++#define bfq_log(bfqd, fmt, args...) \ ++ pr_crit("bfq " fmt "\n", ##args) ++ ++#else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ +#ifdef CONFIG_BFQ_GROUP_IOSCHED +static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); +static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); @@ -7228,7 +9126,11 @@ index fcce855..ea1e7d8 100644 #define bfq_log(bfqd, fmt, args...) \ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args) -@@ -640,15 +690,12 @@ enum bfqq_expiration { ++#endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ + + /* Expiration reasons. */ + enum bfqq_expiration { +@@ -640,15 +730,12 @@ enum bfqq_expiration { BFQ_BFQQ_BUDGET_TIMEOUT, /* budget took too long to be used */ BFQ_BFQQ_BUDGET_EXHAUSTED, /* budget consumed */ BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */ @@ -7246,7 +9148,7 @@ index fcce855..ea1e7d8 100644 /* number of ios merged */ struct blkg_rwstat merged; /* total time spent on device in ns, may not be accurate w/ queueing */ -@@ -657,12 +704,8 @@ struct bfqg_stats { +@@ -657,12 +744,8 @@ struct bfqg_stats { struct blkg_rwstat wait_time; /* number of IOs queued up */ struct blkg_rwstat queued; @@ -7259,7 +9161,7 @@ index fcce855..ea1e7d8 100644 /* sum of number of ios queued across all samples */ struct blkg_stat avg_queue_size_sum; /* count of samples taken for average */ -@@ -680,8 +723,10 @@ struct bfqg_stats { +@@ -680,8 +763,10 @@ struct bfqg_stats { uint64_t start_idle_time; uint64_t start_empty_time; uint16_t flags; @@ -7270,7 +9172,7 @@ index fcce855..ea1e7d8 100644 /* * struct bfq_group_data - per-blkcg storage for the blkio subsystem. * -@@ -692,7 +737,7 @@ struct bfq_group_data { +@@ -692,7 +777,7 @@ struct bfq_group_data { /* must be the first member */ struct blkcg_policy_data pd; @@ -7279,7 +9181,7 @@ index fcce855..ea1e7d8 100644 }; /** -@@ -712,7 +757,7 @@ struct bfq_group_data { +@@ -712,7 +797,7 @@ struct bfq_group_data { * unused for the root group. Used to know whether there * are groups with more than one active @bfq_entity * (see the comments to the function @@ -7288,7 +9190,7 @@ index fcce855..ea1e7d8 100644 * @rq_pos_tree: rbtree sorted by next_request position, used when * determining if two or more queues have interleaving * requests (see bfq_find_close_cooperator()). -@@ -745,7 +790,6 @@ struct bfq_group { +@@ -745,7 +830,6 @@ struct bfq_group { struct rb_root rq_pos_tree; struct bfqg_stats stats; @@ -7296,12 +9198,26 @@ index fcce855..ea1e7d8 100644 }; #else -@@ -767,11 +811,25 @@ bfq_entity_service_tree(struct bfq_entity *entity) +@@ -761,17 +845,38 @@ struct bfq_group { + + static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity); + ++static unsigned int bfq_class_idx(struct bfq_entity *entity) ++{ ++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); ++ ++ return bfqq ? bfqq->ioprio_class - 1 : ++ BFQ_DEFAULT_GRP_CLASS - 1; ++} ++ + static struct bfq_service_tree * + bfq_entity_service_tree(struct bfq_entity *entity) + { struct bfq_sched_data *sched_data = entity->sched_data; struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); - unsigned int idx = bfqq ? bfqq->ioprio_class - 1 : +- unsigned int idx = bfqq ? bfqq->ioprio_class - 1 : - BFQ_DEFAULT_GRP_CLASS; -+ BFQ_DEFAULT_GRP_CLASS - 1; ++ unsigned int idx = bfq_class_idx(entity); BUG_ON(idx >= BFQ_IOPRIO_CLASSES); BUG_ON(sched_data == NULL); @@ -7323,7 +9239,7 @@ index fcce855..ea1e7d8 100644 return sched_data->service_tree + idx; } -@@ -791,47 +849,6 @@ static struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic) +@@ -791,47 +896,6 @@ static struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic) return bic->icq.q->elevator->elevator_data; } @@ -7371,7 +9287,7 @@ index fcce855..ea1e7d8 100644 #ifdef CONFIG_BFQ_GROUP_IOSCHED static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq) -@@ -857,11 +874,13 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio); +@@ -857,11 +921,13 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio); static void bfq_put_queue(struct bfq_queue *bfqq); static void bfq_dispatch_insert(struct request_queue *q, struct request *rq); static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, @@ -7388,5 +9304,5 @@ index fcce855..ea1e7d8 100644 #endif /* _BFQ_H */ -- -2.7.4 (Apple Git-66) +2.10.0 diff --git a/helpers/DATA/linux-hwe/deblob-4.8 b/helpers/DATA/linux-hwe/deblob-4.10 similarity index 96% rename from helpers/DATA/linux-hwe/deblob-4.8 rename to helpers/DATA/linux-hwe/deblob-4.10 index 97e791ad..e374b545 100644 --- a/helpers/DATA/linux-hwe/deblob-4.8 +++ b/helpers/DATA/linux-hwe/deblob-4.10 @@ -1,6 +1,6 @@ #! /bin/sh -# Copyright (C) 2008-2016 Alexandre Oliva <lxoliva@fsfla.org> +# Copyright (C) 2008-2017 Alexandre Oliva <lxoliva@fsfla.org> # Copyright (C) 2008 Jeff Moe # Copyright (C) 2009 Rubén RodrÃÂguez <ruben@gnu.org> # @@ -48,7 +48,7 @@ # For each kver release, start extra with an empty string, then count # from 1 if changes are needed that require rebuilding the tarball. -kver=4.8 extra= +kver=4.10 extra= case $1 in --force) @@ -339,11 +339,31 @@ clean_sed "/^EXTRAVERSION *=/ { s,=$,& ,; s,$,&-gnu$extra,; } grep -q Linux-libre README || clean_sed ' +1 s,^Linux kernel$,GNU Linux-libre, +2 s,^============$,===============, +' README 'renamed to GNU Linux-libre' + +grep -q Linux-libre Documentation/admin-guide/README.rst || +clean_sed ' 1,3 s,Linux kernel release.*kernel\.org.*,GNU Linux-libre <http://linux-libre.fsfla.org>, +2 s,=$,&&, +' Documentation/admin-guide/README.rst 'renamed to GNU Linux-libre' + +grep -q 'release notes for GNU Linux-libre' Documentation/admin-guide/README.rst || +clean_sed ' 2,5 s,Linux version [0-9.]*[0-9],GNU Linux-libre, +' Documentation/admin-guide/README.rst 'dropped partial Linux version' + +grep -q 'Unix kernel' Documentation/admin-guide/README.rst || +clean_sed ' 1,20 s,\(operating system \)\?Unix,Unix kernel, -/WHAT IS LINUX/i\ -WHAT IS GNU Linux-libre?\ +' Documentation/admin-guide/README.rst 'Linux is a kernel' + +grep -q 'What is GNU Linux-libre' Documentation/admin-guide/README.rst || +clean_sed ' +/What is Linux/i\ +What is GNU Linux-libre?\ +------------------------\ \ GNU Linux-libre is a Free version of the kernel Linux (see below),\ suitable for use with the GNU Operating System in 100% Free\ @@ -374,7 +394,7 @@ WHAT IS GNU Linux-libre?\ promotion. See our web page for their images.\ http://linux-libre.fsfla.org/\ -' README 'added blurb about GNU Linux-libre' +' Documentation/admin-guide/README.rst 'added blurb about GNU Linux-libre' # Add reject_firmware and maybe_reject_firmware grep -q _LINUX_LIBRE_FIRMWARE_H include/linux/firmware.h || @@ -471,7 +491,7 @@ maybe_reject_firmware_nowait(struct module *module, int uevent,\ #endif /* _LINUX_LIBRE_FIRMWARE_H */\ ' include/linux/firmware.h 'added non-Free firmware notification support' -grep -q _LINUX_LIBRE_IHEX_FIRMWARE_H include/linux/ihex.h || +grep -q _LINUX_LIBRE_IHEX_H include/linux/ihex.h || clean_sed '$i\ #ifndef _LINUX_LIBRE_IHEX_H\ #define _LINUX_LIBRE_IHEX_H\ @@ -533,6 +553,15 @@ clean_blob drivers/net/ethernet/netx-eth.c clean_kconfig arch/arm/Kconfig ARCH_NETX clean_mk CONFIG_ARCH_NETX arch/arm/Makefile +announce MACH_SUN8I - "Allwinner sun8i Family SoCs support" +clean_blob arch/arm/boot/dts/sun8i-a23-gt90h-v4.dts +clean_blob arch/arm/boot/dts/sun8i-a23-inet86dz.dts +clean_blob arch/arm/boot/dts/sun8i-a23-polaroid-mid2407pxe03.dts +clean_blob arch/arm/boot/dts/sun8i-a23-polaroid-mid2809pxe04.dts +clean_blob arch/arm/boot/dts/sun8i-a33-ga10h-v1.1.dts +clean_kconfig arch/arm/mach-sunxi/Kconfig MACH_SUN8I +clean_mk CONFIG_MACH_SUN8I arch/arm/boot/dts/Makefile + # mips # I couldn't figure out where the firmware name actually comes from. @@ -626,18 +655,14 @@ reject_firmware drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c clean_blob drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c reject_firmware drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c clean_blob drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c -reject_firmware drivers/gpu/drm/amd/amdgpu/iceland_dpm.c -clean_blob drivers/gpu/drm/amd/amdgpu/iceland_dpm.c reject_firmware drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c clean_blob drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c reject_firmware drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c clean_blob drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c -reject_firmware drivers/gpu/drm/amd/amdgpu/fiji_dpm.c -clean_blob drivers/gpu/drm/amd/amdgpu/fiji_dpm.c reject_firmware drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c clean_blob drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c -reject_firmware drivers/gpu/drm/amd/amdgpu/tonga_dpm.c -clean_blob drivers/gpu/drm/amd/amdgpu/tonga_dpm.c +reject_firmware drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +clean_blob drivers/gpu/drm/amd/amdgpu/amdgpu_device.c clean_blob drivers/gpu/drm/amd/amdgpu/vi.c clean_kconfig drivers/gpu/drm/Kconfig DRM_AMDGPU clean_mk CONFIG_DRM_AMDGPU drivers/gpu/drm/amd/amdgpu/Makefile @@ -654,6 +679,16 @@ clean_blob drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c clean_kconfig drivers/gpu/drm/amd/amdgpu/Kconfig DRM_AMDGPU_CIK clean_mk CONFIG_DRM_AMDGPU_CIK drivers/gpu/drm/amd/amdgpu/Makefile +announce DRM_AMDGPU_SI - "Enable amdgpu support for CIK parts" +reject_firmware drivers/gpu/drm/amd/amdgpu/si_dpm.c +clean_blob drivers/gpu/drm/amd/amdgpu/si_dpm.c +reject_firmware drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +clean_blob drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +reject_firmware drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +clean_blob drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +clean_kconfig drivers/gpu/drm/amd/amdgpu/Kconfig DRM_AMDGPU_SI +clean_mk CONFIG_DRM_AMDGPU_SI drivers/gpu/drm/amd/amdgpu/Makefile + announce DRM_AST - "AST server chips" reject_firmware drivers/gpu/drm/ast/ast_dp501.c clean_blob drivers/gpu/drm/ast/ast_dp501.c @@ -668,6 +703,11 @@ clean_blob drivers/gpu/drm/i915/intel_guc_loader.c clean_kconfig drivers/gpu/drm/i915/Kconfig DRM_I915 clean_mk CONFIG_DRM_I915 drivers/gpu/drm/i915/Makefile +announce DRM_I915_GVT - "Enable Intel GVT-g graphics virtualization host support" +reject_firmware drivers/gpu/drm/i915/gvt/firmware.c +clean_kconfig drivers/gpu/drm/i915/Kconfig DRM_I915_GVT +clean_mk CONFIG_DRM_I915_GVT drivers/gpu/drm/i915/Makefile + announce DRM_NOUVEAU - "Nouveau (nVidia) cards" reject_firmware drivers/gpu/drm/nouveau/nvkm/core/firmware.c clean_blob drivers/gpu/drm/nouveau/nvkm/core/firmware.c @@ -691,6 +731,7 @@ clean_mk CONFIG_DRM_MGA drivers/gpu/drm/Makefile announce DRM_MSM - "MSM DRM" reject_firmware drivers/gpu/drm/msm/adreno/adreno_gpu.c +reject_firmware drivers/gpu/drm/msm/adreno/a5xx_power.c clean_blob drivers/gpu/drm/msm/adreno/adreno_device.c clean_kconfig drivers/gpu/drm/msm/Kconfig DRM_MSM clean_mk CONFIG_DRM_MSM drivers/gpu/drm/msm/Makefile @@ -744,6 +785,9 @@ clean_sed ' clean_sed ' /r = r600_init_microcode(rdev);/,/}/ s,return r;,/*(DEBLOBBED)*/, ' drivers/gpu/drm/radeon/evergreen.c 'enable blobless activation' +clean_sed ' +/r = r600_init_microcode(rdev);/,/}/ s,return r;,/*(DEBLOBBED)*/, +' drivers/gpu/drm/radeon/rv770.c 'enable blobless activation' reject_firmware drivers/gpu/drm/radeon/ni.c clean_blob drivers/gpu/drm/radeon/ni.c reject_firmware drivers/gpu/drm/radeon/si.c @@ -780,10 +824,16 @@ clean_blob arch/arm/boot/dts/imx6qdl.dtsi clean_blob arch/arm/boot/dts/imx6sl.dtsi clean_blob arch/arm/boot/dts/imx6sx.dtsi clean_blob arch/arm/boot/dts/imx6ul.dtsi +clean_blob arch/arm/boot/dts/imx7s.dtsi clean_blob Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt clean_kconfig drivers/dma/Kconfig IMX_SDMA clean_mk CONFIG_IMX_SDMA drivers/dma/Makefile +announce ST_FDMA - "ST FDMA dmaengine support" +clean_blob drivers/dma/st_fdma.c +clean_kconfig drivers/dma/Kconfig ST_FDMA +clean_mk CONFIG_ST_FDMA drivers/dma/Makefile + ######### # Media # ######### @@ -1541,10 +1591,11 @@ clean_kconfig drivers/net/ethernet/realtek/Kconfig R8169 clean_mk CONFIG_R8169 drivers/net/ethernet/realtek/Makefile announce SLICOSS - "Alacritech Gigabit IS-NIC cards" -reject_firmware drivers/staging/slicoss/slicoss.c -clean_blob drivers/staging/slicoss/slicoss.c -clean_kconfig drivers/staging/slicoss/Kconfig SLICOSS -clean_mk CONFIG_SLICOSS drivers/staging/slicoss/Makefile +reject_firmware drivers/net/ethernet/alacritech/slicoss.c +clean_blob drivers/net/ethernet/alacritech/slic.h +clean_blob drivers/net/ethernet/alacritech/slicoss.c +clean_kconfig drivers/net/ethernet/alacritech/Kconfig SLICOSS +clean_mk CONFIG_SLICOSS drivers/net/ethernet/alacritech/Makefile announce SPIDER_NET - "Spider Gigabit Ethernet driver" reject_firmware drivers/net/ethernet/toshiba/spider_net.c @@ -1739,14 +1790,20 @@ clean_sed ' goto error;\ } }' drivers/net/wireless/broadcom/b43/main.c 'double-check and reject non-Free firmware' -# Major portions of firmware filenames not deblobbed. +clean_sed ' +/^[\t]*filename = "\(ucode\|b0g0\(bs\)\?initvals\)5";$/! { + s,^\([\t]*filename = "\)\(ucode\|pcm\|[^ "]*initvals\)[0-9][^ ."]*";,\1/*(DEBLOBBED)*/";,g +}' drivers/net/wireless/broadcom/b43/main.c 'cleaned up blob basenames' clean_blob drivers/net/wireless/broadcom/b43/main.c clean_kconfig drivers/net/wireless/broadcom/b43/Kconfig B43 clean_mk CONFIG_B43 drivers/net/wireless/broadcom/b43/Makefile announce B43LEGACY - "Broadcom 43xx-legacy wireless support (mac80211 stack)" reject_firmware drivers/net/wireless/broadcom/b43legacy/main.c -# Major portions of firwmare filenames not deblobbed. +clean_sed ' +{ + s,^\([\t]*filename = "\)\(ucode\|pcm\|[^ "]*initvals\)[0-9][^ ."]*";,\1/*(DEBLOBBED)*/";,g +}' drivers/net/wireless/broadcom/b43legacy/main.c 'cleaned up blob basenames' clean_blob drivers/net/wireless/broadcom/b43legacy/main.c clean_kconfig drivers/net/wireless/broadcom/b43legacy/Kconfig B43LEGACY clean_mk CONFIG_B43LEGACY drivers/net/wireless/broadcom/b43legacy/Makefile @@ -1888,6 +1945,7 @@ clean_mk CONFIG_MT7601U drivers/net/wireless/mediatek/mt7601u/Makefile announce MWIFIEX - "Marvell WiFi-Ex Driver" clean_blob drivers/net/wireless/marvell/mwifiex/README reject_firmware drivers/net/wireless/marvell/mwifiex/main.c +clean_blob drivers/net/wireless/marvell/mwifiex/main.c clean_kconfig drivers/net/wireless/marvell/mwifiex/Kconfig MWIFIEX clean_mk CONFIG_MWIFIEX drivers/net/wireless/marvell/mwifiex/Makefile @@ -2144,13 +2202,6 @@ clean_blob drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c clean_kconfig drivers/net/wireless/realtek/rtlwifi/Kconfig RTL8723AE clean_mk CONFIG_RTL8723AE drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile -announce R8723AU - "RealTek RTL8723AU Wireless LAN NIC driver" -reject_firmware drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c -clean_blob drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c -clean_blob drivers/staging/rtl8723au/os_dep/os_intfs.c -clean_kconfig drivers/staging/rtl8723au/Kconfig R8723AU -clean_mk CONFIG_R8723AU drivers/staging/rtl8723au/Makefile - announce RTL8723BE - "Realtek RTL8723BE PCIe Wireless Network Adapter" reject_firmware drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c clean_blob drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -2189,11 +2240,19 @@ clean_mk CONFIG_WL18XX drivers/net/wireless/ti/wl18xx/Makefile announce WLCORE - "TI wlcore support" reject_firmware drivers/net/wireless/ti/wlcore/main.c -clean_blob drivers/net/wireless/ti/wlcore/main.c -clean_blob drivers/net/wireless/ti/wlcore/wlcore_i.h clean_kconfig drivers/net/wireless/ti/wlcore/Kconfig WLCORE clean_mk CONFIG_WLCORE drivers/net/wireless/ti/wlcore/Makefile +announce WLCORE_SDIO - "TI wlcore SDIO support" +clean_blob drivers/net/wireless/ti/wlcore/sdio.c +clean_kconfig drivers/net/wireless/ti/wlcore/Kconfig WLCORE_SDIO +clean_mk CONFIG_WLCORE_SDIO drivers/net/wireless/ti/wlcore/Makefile + +announce WLCORE_SPI - "TI wlcore SPI support" +clean_blob drivers/net/wireless/ti/wlcore/spi.c +clean_kconfig drivers/net/wireless/ti/wlcore/Kconfig WLCORE_SPI +clean_mk CONFIG_WLCORE_SPI drivers/net/wireless/ti/wlcore/Makefile + announce USB_ZD1201 - "USB ZD1201 based Wireless device support" reject_firmware drivers/net/wireless/zydas/zd1201.c clean_blob drivers/net/wireless/zydas/zd1201.c @@ -2260,6 +2319,12 @@ reject_firmware drivers/bluetooth/hci_bcm.c clean_kconfig drivers/bluetooth/Kconfig BT_HCIUART_BCM clean_mk CONFIG_BT_HCIUART_BCM drivers/bluetooth/Makefile +announce BT_HCIUART_MRVL - "Marvell protocol support" +reject_firmware drivers/bluetooth/hci_mrvl.c +clean_blob drivers/bluetooth/hci_mrvl.c +clean_kconfig drivers/bluetooth/Kconfig BT_HCIUART_MRVL +clean_mk CONFIG_BT_HCIUART_MRVL drivers/bluetooth/Makefile + announce BT_HCIBFUSB - "HCI BlueFRITZ! USB driver" reject_firmware drivers/bluetooth/bfusb.c clean_blob drivers/bluetooth/bfusb.c @@ -2836,15 +2901,17 @@ clean_blob drivers/misc/lattice-ecp3-config.c clean_kconfig drivers/misc/Kconfig LATTICE_ECP3_CONFIG clean_mk CONFIG_LATTICE_ECP3_CONFIG drivers/misc/Makefile -announce STE_MODEM_RPROC - "STE-Modem remoteproc support" +announce REMOTEPROC - "Support for Remote Processor subsystem" maybe_reject_firmware drivers/remoteproc/remoteproc_core.c -undefine_macro SPROC_MODEM_FIRMWARE "\"/*(DEBLOBBED)*/\"" \ - "disabled non-Free firmware" drivers/remoteproc/ste_modem_rproc.c +clean_kconfig drivers/remoteproc/Kconfig REMOTEPROC +clean_mk CONFIG_REMOTEPROC drivers/remoteproc/Makefile + +announce WKUP_M3_RPROC - "AMx3xx Wakeup M3 remoteproc support" clean_blob Documentation/devicetree/bindings/remoteproc/wkup_m3_rproc.txt clean_blob arch/arm/boot/dts/am33xx.dtsi clean_blob arch/arm/boot/dts/am4372.dtsi -clean_kconfig drivers/remoteproc/Kconfig STE_MODEM_RPROC -clean_mk CONFIG_STE_MODEM_RPROC drivers/remoteproc/Makefile +clean_kconfig drivers/remoteproc/Kconfig WKUP_M3_RPROC +clean_mk CONFIG_WKUP_M3_RPROC drivers/remoteproc/Makefile announce QCOM_Q6V5_PIL - "Qualcomm Hexagon V5 Peripherial Image Loader" reject_firmware drivers/remoteproc/qcom_q6v5_pil.c @@ -3234,6 +3301,35 @@ clean_blob sound/usb/6fire/firmware.c clean_kconfig sound/usb/Kconfig SND_USB_6FIRE clean_mk CONFIG_SND_USB_6FIRE sound/usb/6fire/Makefile +############ +# Watchdog # +############ + +announce ZIIRAVE_WATCHDOG - "Zodiac RAVE Watchdog Timer" +reject_firmware drivers/watchdog/ziirave_wdt.c +clean_blob drivers/watchdog/ziirave_wdt.c +clean_kconfig drivers/watchdog/Kconfig ZIIRAVE_WATCHDOG +clean_mk CONFIG_ZIIRAVE_WATCHDOG drivers/watchdog/Makefile + +########### +# Greybus # +########### + +# I couldn't find any evidence of any Free Software firmware for +# devices that use this bus type, so I'm tentatively disabling it all. +announce GREYBUS_FIRMWARE - "Greybus Firmware Download Class driver" +clean_blob drivers/staging/greybus/firmware.h +reject_firmware drivers/staging/greybus/fw-download.c +clean_blob drivers/staging/greybus/fw-download.c +clean_kconfig drivers/staging/greybus/Kconfig GREYBUS_FIRMWARE +clean_mk CONFIG_GREYBUS_FIRMWARE drivers/staging/greybus/Makefile + +announce GREYBUS_BOOTROM - "Greybus Bootrom Class driver" +reject_firmware drivers/staging/greybus/bootrom.c +clean_blob drivers/staging/greybus/bootrom.c +clean_kconfig drivers/staging/greybus/Kconfig GREYBUS_BOOTROM +clean_mk CONFIG_GREYBUS_BOOTROM drivers/staging/greybus/Makefile + ####### # SOC # ####### @@ -3258,7 +3354,8 @@ clean_mk CONFIG_KEYSTONE_NAVIGATOR_QMSS drivers/soc/ti/Makefile announce Documentation - "non-Free firmware scripts and documentation" clean_blob Documentation/media/dvb-drivers/avermedia.rst clean_blob Documentation/media/dvb-drivers/opera-firmware.rst -clean_blob Documentation/sound/alsa/ALSA-Configuration.txt +clean_blob Documentation/media/v4l-drivers/ivtv.rst +clean_blob Documentation/sound/alsa-configuration.rst clean_blob Documentation/sound/oss/MultiSound clean_blob Documentation/sound/oss/PSS clean_blob Documentation/sound/oss/PSS-updates diff --git a/helpers/DATA/linux-hwe/deblob-check b/helpers/DATA/linux-hwe/deblob-check index a4f2f168..f95b9722 100644 --- a/helpers/DATA/linux-hwe/deblob-check +++ b/helpers/DATA/linux-hwe/deblob-check @@ -1,6 +1,6 @@ #! /bin/sh -# deblob-check version 2016-09-25 + 2017-01-09's r13475 +# deblob-check version 2017-02-06 # Inspired in gNewSense's find-firmware script. # Written by Alexandre Oliva <lxoliva@fsfla.org> @@ -47,27 +47,38 @@ # --reverse-patch: Test the removed parts of a patch, rather than # the added ones. -# --use-awk: Choose the internal GNU awk script for the bulk of the -# work. This is the default option, if GNU awk is found. -# The awk interpreter is named gawk, unless AWK is set. - -# --use-sed: Choose the internal GNU sed script for the bulk of the -# work. This is the default option, if GNU awk is not -# found. +# --use-python: Choose the internal python script for the bulk of +# the work. This is the fastest for cleaning up, +# because of the fast startup time of the regular +# expression engine. This option is the default if +# python is found. Set PYTHON to override the python +# interpreter. The internal script works in both Python +# 2 and 3. -# --use-python: Choose the internal python script. This is not -# recommended, because the regular expressions we use -# invoke exponential behavior in the python engine. +# --use-awk: Choose the internal GNU awk script for the bulk of the +# work. This is the recommended option to check entire +# tarballs, because its regular expression engine offers +# the best speed/memory use. This is the default option +# if python is not found. Set AWK to specify GNU awk's +# name. # --use-perl: Choose the internal perl script. This is not # recommended, because our regular expressions exceed -# some limits hard-coded into perl. +# some limits hard-coded into perl. Set PERL to specify +# which perl implementation to use. This is the default +# option if neither python nor GNU awk are found, AND if +# PERL is set. + +# --use-sed: Choose the internal GNU sed script for the bulk of the +# work. This is the default option, if no other +# alternative is found. Use SED to specify which sed +# program to use. # --save-script-input: Save the input that would have been fed to # any of the engines above. # --gen-flex: Generate a flex input file with all known blob and -# false positive patterns. It would have been a fast +# false positive patterns. It might have been a fast # regular expression processor if only the flex program # completed in reasonable time. @@ -893,7 +904,7 @@ set_except () { blobna 'DEFAULT_FIRMWARE' blobna '\([.]\|->\)firmware[ \n]*=[^=]' blobna 'mod_firmware_load' # sound/ - blobname '[.]\(fw\|bin[0-9]*\|hex\|frm\|co[dx]\|dat\|elf\|xlx\|rfb\|ucode\|img\|sbcf\|ctx\(prog\|vals\)\|z77\|wfw\|inp\|dlmem\|cld\)[\\]\?["]' + blobname '[.]\(\(fw\|bin\)[0-9]*\|hex\|frm\|co[dx]\|dat\|elf\|xlx\|rfb\|ucode\|img\|sbcf\|ctx\(prog\|vals\)\|z77\|wfw\|inp\|dlmem\|cld\|tftf\)[\\]\?["]' # Catch misdeblobbed fw extension. blobname '["][^" \t\n]*[/][*][(]DEBLOBBED[)][*][/][^"\\]' # Ideally we'd whitelist URLs that don't recommend non-Free @@ -3937,7 +3948,7 @@ set_except () { accept '[\t]ret[ ]=[ ]p54spi_request_firmware[(]' drivers/net/wireless/p54/p54spi.c accept 'static[ ]int[ ]rt2x00lib_request_firmware[(]' drivers/net/wireless/rt2x00/rt2x00firwmare.c accept '[\t][\t]retval[ ]=[ ]rt2x00lib_request_firmware[(]' drivers/net/wireless/rt2x00/rt2x00firmware.c - accept '[\t][\t]wl1271_error[(]["]request_firmware_nowait[ ]failed' drivers/net/wireless/ti/wlcore/main.c + accept '[\t][\t]*wl1271_error[(]["]request_firmware_nowait[ ]failed' drivers/net/wireless/ti/wlcore/main.c accept '[\t][\t]nfc_err[(][&]drv->pdev->dev[,][ ]["]request_firmware[ ]failed' drivers/nfc/nfcwilink.c accept '[\t][\t][\t]["]request_firmware[ ]returned' drivers/nfc/nfcwilink.c accept '[\t][\t]dev_err[(][&]rproc->dev[,][ ]["]request_firmware_nowait[ ]err' drivers/remoteproc/remoteproc_core.c @@ -4435,7 +4446,7 @@ set_except () { blobname 'pre-cal-%s-%s\.bin' drivers/net/wireless/ath/ath10k/core.c accept '[\t]fw_file->firmware[ ]=[ ]ath10k_fetch_fw_file' drivers/net/wireless/ath/ath10k/core.c blobname 'brcmfmac4356-sdio\.bin' drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c - blobname 'iwlwifi-9000\(-pu-a0-lc-a0-\|-\|\)' drivers/net/wireless/intel/iwlwifi/iwl-9000.c + blobname 'iwlwifi-9000\(-pu-a0-\(jf\|lc\)-a0-\|-\|\)' drivers/net/wireless/intel/iwlwifi/iwl-9000.c blobname 'iwlwifi-9260-th-a0-\(jf\|lc\)-a0-' drivers/net/wireless/intel/iwlwifi/iwl-9000.c blobname 'mrvl[/]pcie8897_uapsta_a0\.bin' drivers/net/wireless/marvell/mwifiex/pcie.h blobname 'mrvl[/]pcieuart8997_combo\(_v2\)\?\.bin' drivers/net/wireless/marvell/mwifiex/pcie.h @@ -4521,9 +4532,133 @@ set_except () { blobname 'modem\.mdt' drivers/remoteproc/qcom_q6v5_pil.c blobname 'mba\.b00' drivers/remoteproc/qcom_q6v5_pil.c + # New in 4.9. + blobname 'rtl_bt[/]rtl\(8723b\|8821a\|8761a\|8822b\)_\(config\|fw\)\.bin' drivers/bluetooth/btrtl.c + blobname 'amdgpu[/]\(topaz\|tonga\)_k_smc\.bin' drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c + blobname 'i915[/]["][^"]*["]_guc_ver["][^"]*["]_["][^"]*["]\.bin' drivers/gpu/drm/i915/intel_guc_loader.c + blobname 'mrvl[/]pcie\(uart\|usb\)8997_combo_v4\.bin' drivers/net/wireless/marvell/mwifiex/pcie.h + blobname 'mrvl[/]pcie8997_wlan_v4\.bin' drivers/net/wireless/marvell/mwifiex/pcie.h + blobname 'mrvl[/]usbusb8997_combo_v4\.bin' drivers/net/wireless/marvell/mwifiex/usb.h + accept '[ ]*[/]Widths[ ]\[[0-9 ]*\]' Documentation/media/media_api_files/typical_media_device.pdf + accept '[ ]*:widths:[0-9 ]*' Documentation/media/uapi/v4l/subdev-formats.rst + defsc 'static[ ]const[ ]struct[ ]iceland_pt_defaults[ ]defaults_iceland\(xt\|pro\)\?[ ]=' drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c + defsc 'static[ ]const[ ]struct[ ]tonga_pt_defaults[ ]tonga_power_tune_data_set_array\[POWERTUNE_DEFAULT_SET_MAX\][ ]=' drivers/gpu/drm/amd/poewrplay/smumgr/tonga_smc.c + defsnc 'static[ ]const[ ]uint32_t[ ]tonga_clock_stretcher_ddt_table\[2\]\[4\]\[4\][ ]=' drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c + defsnc 'static[ ]const[ ]u16[ ]\(forward\|inverse\)_quantization_table\[QUANTIZATION_TABLE_LEN\][ ]=' drivers/media/pci/tw5864/tw5864-video.c + defsnc 'static[ ]const[ ]u16[ ]encoder_vlc_lookup_table\[VLC_LOOKUP_TABLE_LEN\][ ]=' drivers/media/pci/tw5864/tw5864-video.c + defsnc 'static[ ]const[ ]unsigned[ ]int[ ]\(lambda_lookup_table\|intra4x4_lambda3\)\[\][ ]=' drivers/media/pci/tw5864/tw5864-video.c + defsnc 'static[ ]const[ ]struct[ ]iro[ ]iro_arr\[47\][ ]=' drivers/net/ethernet/qlogic/qed/qed_hsi.h + defsnc 'static[ ]const[ ]u8[ ]netvsc_hash_key\[\][ ]=' drivers/net/hyperv/rndis_filter.c + defsc 'static[ ]const[ ]struct[ ]cs42l73_mclk_div[ ]cs42l73_mclk_coeffs\[\][ ]=' sound/soc/codecs/cs42l73.c + defsnc 'static[ ]const[ ]struct[ ]reg_default[ ]rt5660_reg\[\][ ]=' sound/soc/codecs/rt5660.c + defsnc 'static[ ]const[ ]struct[ ]reg_default[ ]rt566[38]_reg\[\][ ]=' sound/soc/codecs/rt5663.c + defsnc '__thread[ ]vector[ ]int[ ]varray\[24\][ ]=' tools/testing/selftests/powerpc/math/vsx_preempt.c + defsnc 'vector[ ]int[ ]vms\[\][ ]=' tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c + defsnc 'vector[ ]int[ ]vss\[\][ ]=' tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c + blobname 'imx[/]sdma[/]sdma-imx7d\.bin' arch/arm/boot/dts/imx7s.dtsi + blobname 'gsl3675-gt90h\.fw' arch/arm/boot/dts/sun8i-a23-gt90h-v4.dts + blobname 'gsl1680-inet86dz\.fw' arch/arm/boot/dts/sun8i-a23-inet86dz.dts + blobname 'gsl1680-polaroid-mid2407pxe03\.fw' arch/arm/boot/dts/sun8i-a23-polaroid-mid2407pxe03.dts + blobname 'gsl3670-polaroid-mid2809pxe04\.fw' arch/arm/boot/dts/sun8i-a23-polaroid-mid2809pxe04.dts + blobname 'gsl3675-ga10h\.fw' arch/arm/boot/dts/sun8i-a33-ga10h-v1.1.dts + blobname 'mrvl[/]helper_uart_3000000\.bin' drivers/bluetooth/hci_mrvl.c + blobname 'mrvl[/]uart8897_bt\.bin' drivers/bluetooth/hci_mrvl.c + accept 'static[ ]int[ ]bnxt_flash_\(firmware\|package\)_from_file[(][ \t\na-z0-9_,*]*[)][\n][{][\n][\t]\([^\n]*[\n]\+[\t]\)*rc[ ]=[ ]request_firmware[(][&]fw' drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c + accept '[\t]*\(rc[ ]=[ ]\)\?wil_request_firmware[(]wil[,][ ]WIL_FW2\?_NAME[,][ ]\(true\|false\)[)][;]' drivers/net/wireless/ath/wil6210/main.c + blobname 'mwifiex_mfg\.bin' drivers/net/wireless/marvell/mwifiex/main.c + accept '[\t]*mwifiex_dbg[(]adapter[,][ ]ERROR[,][\n][\t ]*["]request_firmware[ ]error' drivers/net/wireless/marvell/mwifiex/main.c + blobname 'ti-connectivity[/]wl12[78]x-nvs\.bin' 'drivers/net/wireless/ti/wlcore/\(sdio\|spi\)\.c' + blobname 'ti-connectivity[/]wl18xx-conf\.bin' 'drivers/net/wireless/ti/wlcore/\(sdio\|spi\)\.c' + blobname 'ziirave_wdt\.fw' drivers/watchdog/wiirave_wdt.c + blobna '["]gmp_["]' drivers/staging/greybus/firmware.h + blobna '["]FW_NAME_PREFIX["]["]%08x_%08x_%08x_%08x_%s\.tftf["][\n][^*]*\([*]\+[^/*][^*]*\)*[*]\+[/]' drivers/staging/greybus/firmware.h + blobname '\(gmp_\)\?%08x_%08x_%08x_%08x_s2l\.tftf' drivers/staging/greybus/bootrom.c + blobname '\(gmp_\)\?%08x_%08x_%08x_%08x_\(%s\|[^"]*\)\.tftf' drivers/staging/greybus/fw-download.c + # Long-needed b43 cleanup. These are actually cleaned up with + # custom code in deblob-<kver>. Only ucode5, b0g0initvals5 and + # b0g0bsinitvals5 are provided by openfwwf, and only b43 (not + # b43legacy) can use the openfwwf files, so anything else in b43 + # and b43legacy ought to be cleaned up. + accept '[\t]*filename[ ]=[ ]["]\(ucode\|b0g0\(bs\)\?initvals\)5["][;]' drivers/net/wireless/broadcom/b43.c + blobna '["]\(ucode\|[^ "\n]*initvals\)[0-9][^" .\n]*["]' 'drivers/net/wireless/broadcom/b43\(legacy\)\?\.c' + # We want to be more specific than deblob0-<kver> in the pcm + # matches, to avoid numerous false positives in sound pcm drivers. + blobna '["]pcm[45]["]' 'drivers/net/wireless/broadcom/b43\(legacy\)\?\.c' + blobna '[/][*][ ]What[ ]about[ ][^ \n]*initvals22[?][ ][*][/]' drivers/net/wireless/broadcom/b43.c + # New in 4.9.2 and 4.8.17. accept '[\t]*fwname[ ]=[ ]["]fuc4\(09\|1a\)[cd]["][;]' drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c accept '[\t]*snprintf[(]f[,][ ]sizeof[(]f[)][,][ ]["]nouveau[/]nv%02x_%s["][,][ ]device->chipset[,][ ]fwname[)][;][\n][\t]*ret[ ]=[ ]request_firmware[(][&]fw[,][ ]f[,][ ]device->dev[)][;][\n][\t]*if[ ][(]ret[)][ ][{][\n][\t]*snprintf[(]f[,][ ]sizeof[(]f[)][,][ ]["]nouveau[/]%s["][,][ ]fwname[)][;][\n][\t]*ret[ ]=[ ]request_firmware[(][&]fw[,][ ]f[,][ ]device->dev[)][;]' drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c + + # New in 4.10-rc*. + accept '[ ]*d=["]m[ ]0[,]0[ ][^z\n]*z["]' Documentation/media/uapi/v4l/crop.svg + accept '[ ]*Kernel[ ]driver[ ]in[ ]use:[ ]serial\([\n][ ]*[0-3]0:[ 0-9a-f]*\)*' Documentation/media/v4l-drivers/ivtv.rst + accept 'tbl_shf_table:[\n]\([/][/][^\n]*[\n]\)*[\n]*\([\n][\t]\.byte[ \t0-9xa-f]*\)*' arch/arm/crypto/crct10dif-ce-core.S + accept '\.LK256:\([\n][ ]*\.long[ ]*0\(x[0-9a-f]*\([,]0x[0-9a-f]*\)*\)\?\)*[ \t]*[/][/][ ]*terminator' arch/arm64/crypto/sha256-core.S_shipped + accept '\.LK[$]BITS:\([\n]___[\n][$]code\.=<<___[ ]if[^\n]*\([\n][ ]*\.\(long\|quad\)[ ]*0\(x[0-9a-f]*\([,]0x[0-9a-f]*\)*\)\?\)*[ \t]*[/][/][ ]*terminator\)*' arch/arm64/crypto/sha512-armv8.pl + accept '\.LK512:\([\n][ ]*\.quad[ ]*0\(x[0-9a-f]*\([,]0x[0-9a-f]*\)*\)\?\)*[ \t]*[/][/][ ]*terminator' arch/arm64/crypto/sha512-core.S_shipped + accept '[ ][*][ ]Concurrent[ ]request_firmware[(][)][ ]for[ ]the[ ]same' drivers/base/firmware_class.c + defsnc 'static[ ]const[ ]struct[ ]rcar_gen2_cpg_pll_config[ ]cpg_pll_configs\[8\][ ]__initconst[ ]=' drivers/clk/renesas/r8a7745-cpg-mssr.c + defsnc 'static[ ]unsigned[ ]int[ ]eotf_33_linear_mapping\[OSD_EOTF_LUT_SIZE\][ ]=' drivers/gpu/drm/meson/meson_viu.c + defsnc 'static[ ]unsigned[ ]int[ ]oetf_41_linear_mapping\[OSD_OETF_LUT_SIZE\][ ]=' drivers/gpu/drm/meson/meson_viu.c + defsnc 'static[ ]unsigned[ ]int[ ]vpp_filter_coefs_4point_bspline\[\][ ]=' drivers/gpu/drm/meson/meson_vpp.c + defsnc 'static[ ]const[ ]u32[ ]a5xx_registers\[\][ ]=' drivers/gpu/drm/msm/adreno/a5xx_gpu.c + defsnc '[}][ ]a5xx_sequence_regs\[\][ ]=' drivers/gpu/drm/msm/adreno/a5dxx_power.c + defsnc 'static[ ]uint32_t[ ]gf100_ce_\(data\|code\)\[\][ ]=' drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h + defsnc 'static[ ]uint32_t[ ]gt215_ce_\(data\|code\)\[\][ ]=' drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf215.fuc3.h + defsnc 'static[ ]uint32_t[ ]gf100_grgpc_\(data\|code\)\[\][ ]=' drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h + defsnc 'static[ ]uint32_t[ ]gf117_grgpc_\(data\|code\)\[\][ ]=' drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h + defsnc 'static[ ]uint32_t[ ]gk104_grgpc_\(data\|code\)\[\][ ]=' drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h + defsnc 'static[ ]uint32_t[ ]gk110_grgpc_\(data\|code\)\[\][ ]=' drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h + defsnc 'static[ ]uint32_t[ ]gk208_grgpc_\(data\|code\)\[\][ ]=' drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h + defsnc 'static[ ]uint32_t[ ]gm107_grgpc_code\[\][ ]=' drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5.h + defsnc 'static[ ]uint32_t[ ]gf100_grhub_\(data\|code\)\[\][ ]=' drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h + defsnc 'static[ ]uint32_t[ ]gf117_grhub_\(data\|code\)\[\][ ]=' drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h + defsnc 'static[ ]uint32_t[ ]gk104_grhub_\(data\|code\)\[\][ ]=' drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h + defsnc 'static[ ]uint32_t[ ]gk110_grhub_\(data\|code\)\[\][ ]=' drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h + defsnc 'static[ ]uint32_t[ ]gk208_grhub_\(data\|code\)\[\][ ]=' drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h + defsnc 'static[ ]uint32_t[ ]gm107_grhub_\(data\|code\)\[\][ ]=' drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h + defsnc 'static[ ]uint32_t[ ]g98_psec_\(data\|code\)\[\][ ]=' drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h + defsnc 'static[ ]uint32_t[ ]gf100_pmu_\(data\|code\)\[\][ ]=' drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h + defsnc 'static[ ]uint32_t[ ]gf119_pmu_\(data\|code\)\[\][ ]=' drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf119.fuc4.h + defsnc 'static[ ]uint32_t[ ]gk208_pmu_\(data\|code\)\[\][ ]=' drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h + defsnc 'static[ ]uint32_t[ ]gt215_pmu_\(data\|code\)\[\][ ]=' drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h + defsnc 'static[ ]uint32_t[ ]g98_sec_\(data\|code\)\[\][ ]=' drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf98.fuc0s.h + defsnc 'static[ ]const[ ]u16[ ]lmp91000_temp_lut\[\][ ]=' drivers/iio/potentiostat/lmp91000.c + defsnc 'static[ ]const[ ]u8[ ]fdp1_mdet\[\][ ]=' drivers/media/platform/rcar_fdp1.c + defsnc 'static[ ]struct[ ]cs35l34_mclk_div[ ]cs35l34_mclk_coeffs\[\][ ]=' sound/soc/codecs/cs35l34.c + defsnc 'static[ ]const[ ]struct[ ]cs42l42_pll_params[ ]pll_ratio_table\[\][ ]=' sound/soc/codecs/cs42l42.c + defsnc 'static[ ]const[ ]struct[ ]reg_default[ ]rt5663_v2_reg\[\][ ]=' sound/soc/codecs/rt5663.c + defsnc 'static[ ]const[ ]struct[ ]reg_default[ ]rt5665_reg\[\][ ]=' sound/soc/codecs/rt5665.c + defsnc 'static[ ]const[ ]struct[ ]reg_default[ ]stac9766_reg_defaults\[\][ ]=' sound/soc/codecs/stac9766.c + defsnc 'static[ ]const[ ]struct[ ]reg_default[ ]wm9705_reg_defaults\[\][ ]=' sound/soc/codecs/wm9705.c + defsnc 'static[ ]const[ ]struct[ ]reg_default[ ]wm9712_reg_defaults\[\][ ]=' sound/soc/codecs/wm9712.c + blobname 'fdma_\(%s_%d\|[^\." ,;_]*_[0-9][0-9]*\)\.elf' drivers/dma/st_fdma.c + blobname 'amdgpu[/]polaris12_smc\.bin' drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c + blobname 'amdgpu[/]polaris12_uvd\.bin' drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c + blobname 'amdgpu[/]polaris12_vce\.bin' drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c + blobname 'amdgpu[/]polaris12_\(ce\|pfp\|me\|mec\|mec2\|rlc\)\.bin' drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c + blobname 'amdgpu[/]polaris12_mc\.bin' drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c + blobname 'amdgpu[/]polaris12_sdma1\?\.bin' drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c + blobname 'amdgpu[/]polaris12_smc\.bin' drivers/gpu/drm/amd/amdgpu/vi.c + blobname 'radeon[/]si58_mc\.bin' 'drivers/gpu/drm/amd/amdgpu/gmc_v6_0\.c\|drivers/gpu/drm/radeon/si\.c' + blobname 'a530_p\(m4\|fp\)\.fw' drivers/gpu/drm/msm/adreno/adreno_device.c + blobname 'radeon[/]banks_k_2_smc\.bin' 'drivers/gpu/drm/amd/amdgpu/si_dpm\.c\|drivers/gpu/drm/radeon/si\.c' + blobname 'melfas_mip4_%04X\.fw' drivers/input/touchscreen/melfas_mip4.c + blobname 'cbfw-3\.2\.5\.1\.bin' drivers/scsi/bfa/bfad.c + blobname 'r8a779x_usb3_v3\.dlmem' drivers/usb/host/xhci-rcar.h + blob 'https\?:[/][/]linuxtv\.org[/][^" >]*firmware[/][^" \t\n>\\)]*' 'Documentation/media/v4l-drivers/ivtv\.rst\|drivers/media/pci/ttpci/av7110\.c\|firmware/WHENCE' + # The firmware file name is supplied by the user. + accept '[\t]ret[ ]=[ ]request_firmware[(][&]fw[,][ ]fw_name[,]\([^\n]*[\n]\+[^\n}]\)*ret[ ]=[ ]rmi_firmware_update[(]data[,][ ]fw[)]' drivers/input/rmi4/rmi_f34.c + # This seems to be an example file name of something to be + # supplied by the user in the DTS file, not the name of an actual + # firmware file. + accept '[\t ]*firmware-name[ ]=[ ]["]zynq-gpio\.bin["][;]' Documentation/devicetree/bindings/fpga/fpga-region.txt + accept '[\t]if[ ][(]of_property_read_bool[(]np[,][ ]["]qca[,]no-eeprom["][)][)][ ][{][\n][\t]*[/][*][ ]ath9k-eeprom-<bus>-<id>\.bin[ ][*][/][\n][\t]*scnprintf[(]eeprom_name[,][ ]sizeof[(]eeprom_name[)][,][\n][\t ]*["]ath9k-eeprom-%s-%s\.bin["][,]' drivers/net/wireless/ath/ath9k/init.c + blobname 'iwlwifi-3168-' drivers/net/wireless/intel/iwlwifi/iwl-7000.c + blobname 'iwlwifi-8265-' drivers/net/wireless/intel/iwlwifi/iwl-8000.c + blobname 'iwlwifi-[0-9][^"\n\t ]*-' drivers/net/wireless/intel/iwlwifi/iwl-8000.c + blobname 'a530v3_gpmu\.fw2' drivers/gpu/drm/msm/adreno/adreno_device.c ;; */*freedo*.patch | */*logo*.patch) diff --git a/helpers/DATA/linux-hwe/deblob-main b/helpers/DATA/linux-hwe/deblob-main index 8f24b6e4..80c8ba91 100644 --- a/helpers/DATA/linux-hwe/deblob-main +++ b/helpers/DATA/linux-hwe/deblob-main @@ -1,6 +1,6 @@ #! /bin/sh -# Copyright (C) 2008-2016 Alexandre Oliva <lxoliva@fsfla.org> +# Copyright (C) 2008-2017 Alexandre Oliva <lxoliva@fsfla.org> # This program is part of GNU Linux-libre, a GNU project that # publishes scripts to clean up Linux so as to make it suitable for @@ -112,7 +112,7 @@ else fi x1="kver=$mver extra=$extra" -x2=`grep "^kver=[^ ]* extra=" $deblob` +x2=`grep "^kver=[^ ]* extra=" $deblob | sed 's, *#.*,,'` if test "$x1" = "$x2"; then : else diff --git a/helpers/DATA/linux-hwe/silent-accept-firmware.patch b/helpers/DATA/linux-hwe/silent-accept-firmware.patch index ee67f873..20bab538 100644 --- a/helpers/DATA/linux-hwe/silent-accept-firmware.patch +++ b/helpers/DATA/linux-hwe/silent-accept-firmware.patch @@ -1,8 +1,8 @@ -diff -ru source.bak/drivers/base/firmware_class.c source/drivers/base/firmware_class.c ---- source.bak/drivers/base/firmware_class.c 2017-05-31 17:29:09.000000000 -0400 -+++ source/drivers/base/firmware_class.c 2017-05-31 17:32:41.346386778 -0400 -@@ -97,7 +97,7 @@ - FW_STATUS_ABORT, +diff -ru source/drivers/base/firmware_class.c source/drivers/base/firmware_class.c +--- source/drivers/base/firmware_class.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/base/firmware_class.c 2017-08-21 10:54:54.485544208 -0400 +@@ -99,7 +99,7 @@ + FW_STATUS_ABORTED, }; -static int loading_timeout = 60; /* In seconds */ @@ -10,7 +10,7 @@ diff -ru source.bak/drivers/base/firmware_class.c source/drivers/base/firmware_c static inline long firmware_loading_timeout(void) { -@@ -351,14 +351,14 @@ +@@ -419,14 +419,14 @@ id); if (rc) { if (rc == -ENOENT) @@ -28,9 +28,9 @@ diff -ru source.bak/drivers/base/firmware_class.c source/drivers/base/firmware_c - dev_dbg(device, "direct-loading %s\n", buf->fw_id); + dev_dbg(device, "direct-loading\n", buf->fw_id); buf->size = size; - fw_finish_direct_load(device, buf); + fw_state_done(&buf->fw_st); break; -@@ -949,7 +949,7 @@ +@@ -1010,7 +1010,7 @@ if (opt_flags & FW_OPT_UEVENT) { buf->need_uevent = true; dev_set_uevent_suppress(f_dev, false); @@ -39,7 +39,7 @@ diff -ru source.bak/drivers/base/firmware_class.c source/drivers/base/firmware_c kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD); } else { timeout = MAX_JIFFY_OFFSET; -@@ -1065,7 +1065,7 @@ +@@ -1099,7 +1099,7 @@ } if (fw_get_builtin_firmware(firmware, name, dbuf, size)) { @@ -48,7 +48,7 @@ diff -ru source.bak/drivers/base/firmware_class.c source/drivers/base/firmware_c return 0; /* assigned */ } -@@ -1152,11 +1152,11 @@ +@@ -1186,11 +1186,11 @@ goto out; ret = 0; @@ -62,7 +62,7 @@ diff -ru source.bak/drivers/base/firmware_class.c source/drivers/base/firmware_c name); ret = -EBUSY; goto out; -@@ -1164,7 +1164,7 @@ +@@ -1198,7 +1198,7 @@ } else { ret = usermodehelper_read_trylock(); if (WARN_ON(ret)) { @@ -71,7 +71,7 @@ diff -ru source.bak/drivers/base/firmware_class.c source/drivers/base/firmware_c name); goto out; } -@@ -1174,12 +1174,13 @@ +@@ -1208,12 +1208,13 @@ if (ret) { if (!(opt_flags & FW_OPT_NO_WARN)) dev_warn(device, @@ -87,10 +87,12 @@ diff -ru source.bak/drivers/base/firmware_class.c source/drivers/base/firmware_c } } -diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c source/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c ---- source.bak/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c 2017-05-31 17:32:41.354386778 -0400 -@@ -786,7 +786,7 @@ +Only in source/drivers/base: firmware_class.c.orig +Only in source/drivers/base: firmware_class.c.rej +diff -ru source/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c source/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +--- source/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c 2017-08-21 10:54:54.485544208 -0400 +@@ -856,7 +856,7 @@ err = amdgpu_ucode_validate(adev->pm.fw); if (err) { @@ -99,10 +101,11 @@ diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c source/drivers/gpu/d release_firmware(adev->pm.fw); adev->pm.fw = NULL; return err; -diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c source/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c ---- source.bak/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 2017-05-31 17:32:41.354386778 -0400 -@@ -155,7 +155,7 @@ +Only in source/drivers/gpu/drm/amd/amdgpu: amdgpu_cgs.c.rej +diff -ru source/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c source/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +--- source/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 2017-08-21 10:54:54.485544208 -0400 +@@ -160,7 +160,7 @@ r = request_firmware(&adev->uvd.fw, fw_name, adev->dev); if (r) { @@ -111,10 +114,11 @@ diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c source/drivers/gpu/d fw_name); return r; } -diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c source/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c ---- source.bak/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 2017-05-31 17:32:41.358386778 -0400 -@@ -128,7 +128,7 @@ +Only in source/drivers/gpu/drm/amd/amdgpu: amdgpu_uvd.c.orig +diff -ru source/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c source/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +--- source/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 2017-08-21 10:54:54.485544208 -0400 +@@ -133,7 +133,7 @@ r = request_firmware(&adev->vce.fw, fw_name, adev->dev); if (r) { @@ -123,10 +127,11 @@ diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c source/drivers/gpu/d fw_name); return r; } -diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/ci_dpm.c source/drivers/gpu/drm/amd/amdgpu/ci_dpm.c ---- source.bak/drivers/gpu/drm/amd/amdgpu/ci_dpm.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/amd/amdgpu/ci_dpm.c 2017-05-31 17:32:41.358386778 -0400 -@@ -5792,7 +5792,7 @@ +Only in source/drivers/gpu/drm/amd/amdgpu: amdgpu_vce.c.orig +diff -ru source/drivers/gpu/drm/amd/amdgpu/ci_dpm.c source/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +--- source/drivers/gpu/drm/amd/amdgpu/ci_dpm.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/amd/amdgpu/ci_dpm.c 2017-08-21 10:54:54.485544208 -0400 +@@ -5806,7 +5806,7 @@ out: if (err) { printk(KERN_ERR @@ -135,9 +140,10 @@ diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/ci_dpm.c source/drivers/gpu/drm/a fw_name); release_firmware(adev->pm.fw); adev->pm.fw = NULL; -diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/cik_sdma.c source/drivers/gpu/drm/amd/amdgpu/cik_sdma.c ---- source.bak/drivers/gpu/drm/amd/amdgpu/cik_sdma.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/amd/amdgpu/cik_sdma.c 2017-05-31 17:32:41.358386778 -0400 +Only in source/drivers/gpu/drm/amd/amdgpu: ci_dpm.c.orig +diff -ru source/drivers/gpu/drm/amd/amdgpu/cik_sdma.c source/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +--- source/drivers/gpu/drm/amd/amdgpu/cik_sdma.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/amd/amdgpu/cik_sdma.c 2017-08-21 10:54:54.485544208 -0400 @@ -143,7 +143,7 @@ out: if (err) { @@ -147,21 +153,9 @@ diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/cik_sdma.c source/drivers/gpu/drm fw_name); for (i = 0; i < adev->sdma.num_instances; i++) { release_firmware(adev->sdma.instance[i].fw); -diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c source/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c ---- source.bak/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c 2017-05-31 17:32:41.358386778 -0400 -@@ -51,7 +51,7 @@ - - out: - if (err) { -- DRM_ERROR("Failed to load firmware \"%s\"", fw_name); -+ DRM_ERROR("Failed to load firmware\n", fw_name); - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - } -diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c source/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c ---- source.bak/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 2017-05-31 17:32:41.358386778 -0400 +diff -ru source/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c source/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +--- source/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 2017-08-21 10:54:54.489544208 -0400 @@ -973,7 +973,7 @@ out: if (err) { @@ -171,10 +165,10 @@ diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c source/drivers/gpu/drm fw_name); release_firmware(adev->gfx.pfp_fw); adev->gfx.pfp_fw = NULL; -diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c source/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c ---- source.bak/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 2017-05-31 17:29:09.000000000 -0400 -+++ source/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 2017-05-31 17:32:41.358386778 -0400 -@@ -1069,7 +1069,7 @@ +diff -ru source/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c source/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +--- source/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 2017-08-21 10:54:54.489544208 -0400 +@@ -1097,7 +1097,7 @@ out: if (err) { dev_err(adev->dev, @@ -183,9 +177,10 @@ diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c source/drivers/gpu/drm fw_name); release_firmware(adev->gfx.pfp_fw); adev->gfx.pfp_fw = NULL; -diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c source/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c ---- source.bak/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c 2017-05-31 17:32:41.358386778 -0400 +Only in source/drivers/gpu/drm/amd/amdgpu: gfx_v8_0.c.orig +diff -ru source/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c source/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +--- source/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c 2017-08-21 10:54:54.489544208 -0400 @@ -162,7 +162,7 @@ out: if (err) { @@ -195,10 +190,10 @@ diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c source/drivers/gpu/drm fw_name); release_firmware(adev->mc.fw); adev->mc.fw = NULL; -diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c source/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c ---- source.bak/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c 2017-05-31 17:32:41.358386778 -0400 -@@ -240,7 +240,7 @@ +diff -ru source/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c source/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +--- source/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c 2017-08-21 10:54:54.489544208 -0400 +@@ -246,7 +246,7 @@ out: if (err) { printk(KERN_ERR @@ -207,21 +202,10 @@ diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c source/drivers/gpu/drm fw_name); release_firmware(adev->mc.fw); adev->mc.fw = NULL; -diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c source/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c ---- source.bak/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c 2017-05-31 17:32:41.358386778 -0400 -@@ -51,7 +51,7 @@ - - out: - if (err) { -- DRM_ERROR("Failed to load firmware \"%s\"", fw_name); -+ DRM_ERROR("Failed to load firmware\n", fw_name); - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - } -diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c source/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c ---- source.bak/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 2017-05-31 17:32:41.362386778 -0400 +Only in source/drivers/gpu/drm/amd/amdgpu: gmc_v8_0.c.orig +diff -ru source/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c source/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +--- source/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 2017-08-21 10:54:54.489544208 -0400 @@ -171,7 +171,7 @@ out: if (err) { @@ -231,10 +215,10 @@ diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c source/drivers/gpu/dr fw_name); for (i = 0; i < adev->sdma.num_instances; i++) { release_firmware(adev->sdma.instance[i].fw); -diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c source/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c ---- source.bak/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 2017-05-31 17:32:41.362386778 -0400 -@@ -316,7 +316,7 @@ +diff -ru source/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c source/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +--- source/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 2017-08-21 10:54:54.489544208 -0400 +@@ -322,7 +322,7 @@ out: if (err) { printk(KERN_ERR @@ -243,21 +227,10 @@ diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c source/drivers/gpu/dr fw_name); for (i = 0; i < adev->sdma.num_instances; i++) { release_firmware(adev->sdma.instance[i].fw); -diff -ru source.bak/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c source/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c ---- source.bak/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c 2017-05-31 17:32:41.362386778 -0400 -@@ -50,7 +50,7 @@ - - out: - if (err) { -- DRM_ERROR("Failed to load firmware \"%s\"", fw_name); -+ DRM_ERROR("Failed to load firmware\n", fw_name); - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - } -diff -ru source.bak/drivers/gpu/drm/drm_edid_load.c source/drivers/gpu/drm/drm_edid_load.c ---- source.bak/drivers/gpu/drm/drm_edid_load.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/drm_edid_load.c 2017-05-31 17:32:41.362386778 -0400 +Only in source/drivers/gpu/drm/amd/amdgpu: sdma_v3_0.c.orig +diff -ru source/drivers/gpu/drm/drm_edid_load.c source/drivers/gpu/drm/drm_edid_load.c +--- source/drivers/gpu/drm/drm_edid_load.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/drm_edid_load.c 2017-08-21 10:54:54.489544208 -0400 @@ -188,8 +188,8 @@ err = request_firmware(&fw, name, &pdev->dev); platform_device_unregister(pdev); @@ -269,9 +242,9 @@ diff -ru source.bak/drivers/gpu/drm/drm_edid_load.c source/drivers/gpu/drm/drm_e return ERR_PTR(err); } -diff -ru source.bak/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c source/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c ---- source.bak/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c 2017-05-31 17:32:41.362386778 -0400 +diff -ru source/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c source/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c +--- source/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c 2017-08-21 10:54:54.489544208 -0400 @@ -109,7 +109,7 @@ ret = request_firmware(&fw, name, device->dev); @@ -281,9 +254,9 @@ diff -ru source.bak/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c source/drivers/ return ret; } -diff -ru source.bak/drivers/gpu/drm/r128/r128_cce.c source/drivers/gpu/drm/r128/r128_cce.c ---- source.bak/drivers/gpu/drm/r128/r128_cce.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/r128/r128_cce.c 2017-05-31 17:32:41.362386778 -0400 +diff -ru source/drivers/gpu/drm/r128/r128_cce.c source/drivers/gpu/drm/r128/r128_cce.c +--- source/drivers/gpu/drm/r128/r128_cce.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/r128/r128_cce.c 2017-08-21 10:54:54.489544208 -0400 @@ -155,14 +155,14 @@ rc = request_firmware(&fw, FIRMWARE_NAME, &pdev->dev); platform_device_unregister(pdev); @@ -301,10 +274,10 @@ diff -ru source.bak/drivers/gpu/drm/r128/r128_cce.c source/drivers/gpu/drm/r128/ fw->size, FIRMWARE_NAME); rc = -EINVAL; goto out_release; -diff -ru source.bak/drivers/gpu/drm/radeon/cik.c source/drivers/gpu/drm/radeon/cik.c ---- source.bak/drivers/gpu/drm/radeon/cik.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/radeon/cik.c 2017-05-31 17:32:41.362386778 -0400 -@@ -2078,7 +2078,7 @@ +diff -ru source/drivers/gpu/drm/radeon/cik.c source/drivers/gpu/drm/radeon/cik.c +--- source/drivers/gpu/drm/radeon/cik.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/radeon/cik.c 2017-08-21 10:54:54.489544208 -0400 +@@ -2070,7 +2070,7 @@ goto out; if (rdev->pfp_fw->size != pfp_req_size) { printk(KERN_ERR @@ -313,7 +286,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/cik.c source/drivers/gpu/drm/radeon/c rdev->pfp_fw->size, fw_name); err = -EINVAL; goto out; -@@ -2087,7 +2087,7 @@ +@@ -2079,7 +2079,7 @@ err = radeon_ucode_validate(rdev->pfp_fw); if (err) { printk(KERN_ERR @@ -322,7 +295,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/cik.c source/drivers/gpu/drm/radeon/c fw_name); goto out; } else { -@@ -2104,7 +2104,7 @@ +@@ -2096,7 +2096,7 @@ goto out; if (rdev->me_fw->size != me_req_size) { printk(KERN_ERR @@ -331,7 +304,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/cik.c source/drivers/gpu/drm/radeon/c rdev->me_fw->size, fw_name); err = -EINVAL; } -@@ -2112,7 +2112,7 @@ +@@ -2104,7 +2104,7 @@ err = radeon_ucode_validate(rdev->me_fw); if (err) { printk(KERN_ERR @@ -340,7 +313,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/cik.c source/drivers/gpu/drm/radeon/c fw_name); goto out; } else { -@@ -2129,7 +2129,7 @@ +@@ -2121,7 +2121,7 @@ goto out; if (rdev->ce_fw->size != ce_req_size) { printk(KERN_ERR @@ -349,7 +322,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/cik.c source/drivers/gpu/drm/radeon/c rdev->ce_fw->size, fw_name); err = -EINVAL; } -@@ -2137,7 +2137,7 @@ +@@ -2129,7 +2129,7 @@ err = radeon_ucode_validate(rdev->ce_fw); if (err) { printk(KERN_ERR @@ -358,7 +331,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/cik.c source/drivers/gpu/drm/radeon/c fw_name); goto out; } else { -@@ -2154,7 +2154,7 @@ +@@ -2146,7 +2146,7 @@ goto out; if (rdev->mec_fw->size != mec_req_size) { printk(KERN_ERR @@ -367,7 +340,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/cik.c source/drivers/gpu/drm/radeon/c rdev->mec_fw->size, fw_name); err = -EINVAL; } -@@ -2162,7 +2162,7 @@ +@@ -2154,7 +2154,7 @@ err = radeon_ucode_validate(rdev->mec_fw); if (err) { printk(KERN_ERR @@ -376,7 +349,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/cik.c source/drivers/gpu/drm/radeon/c fw_name); goto out; } else { -@@ -2194,7 +2194,7 @@ +@@ -2186,7 +2186,7 @@ goto out; if (rdev->rlc_fw->size != rlc_req_size) { printk(KERN_ERR @@ -385,7 +358,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/cik.c source/drivers/gpu/drm/radeon/c rdev->rlc_fw->size, fw_name); err = -EINVAL; } -@@ -2202,7 +2202,7 @@ +@@ -2194,7 +2194,7 @@ err = radeon_ucode_validate(rdev->rlc_fw); if (err) { printk(KERN_ERR @@ -394,7 +367,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/cik.c source/drivers/gpu/drm/radeon/c fw_name); goto out; } else { -@@ -2219,7 +2219,7 @@ +@@ -2211,7 +2211,7 @@ goto out; if (rdev->sdma_fw->size != sdma_req_size) { printk(KERN_ERR @@ -403,7 +376,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/cik.c source/drivers/gpu/drm/radeon/c rdev->sdma_fw->size, fw_name); err = -EINVAL; } -@@ -2227,7 +2227,7 @@ +@@ -2219,7 +2219,7 @@ err = radeon_ucode_validate(rdev->sdma_fw); if (err) { printk(KERN_ERR @@ -412,7 +385,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/cik.c source/drivers/gpu/drm/radeon/c fw_name); goto out; } else { -@@ -2251,7 +2251,7 @@ +@@ -2243,7 +2243,7 @@ if ((rdev->mc_fw->size != mc_req_size) && (rdev->mc_fw->size != mc2_req_size)){ printk(KERN_ERR @@ -421,7 +394,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/cik.c source/drivers/gpu/drm/radeon/c rdev->mc_fw->size, fw_name); err = -EINVAL; } -@@ -2260,7 +2260,7 @@ +@@ -2252,7 +2252,7 @@ err = radeon_ucode_validate(rdev->mc_fw); if (err) { printk(KERN_ERR @@ -430,7 +403,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/cik.c source/drivers/gpu/drm/radeon/c fw_name); goto out; } else { -@@ -2278,14 +2278,14 @@ +@@ -2270,14 +2270,14 @@ err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); if (err) { printk(KERN_ERR @@ -447,7 +420,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/cik.c source/drivers/gpu/drm/radeon/c rdev->smc_fw->size, fw_name); err = -EINVAL; } -@@ -2293,7 +2293,7 @@ +@@ -2285,7 +2285,7 @@ err = radeon_ucode_validate(rdev->smc_fw); if (err) { printk(KERN_ERR @@ -456,7 +429,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/cik.c source/drivers/gpu/drm/radeon/c fw_name); goto out; } else { -@@ -2315,7 +2315,7 @@ +@@ -2307,7 +2307,7 @@ if (err) { if (err != -EINVAL) printk(KERN_ERR @@ -465,9 +438,10 @@ diff -ru source.bak/drivers/gpu/drm/radeon/cik.c source/drivers/gpu/drm/radeon/c fw_name); release_firmware(rdev->pfp_fw); rdev->pfp_fw = NULL; -diff -ru source.bak/drivers/gpu/drm/radeon/ni.c source/drivers/gpu/drm/radeon/ni.c ---- source.bak/drivers/gpu/drm/radeon/ni.c 2017-05-31 17:29:09.000000000 -0400 -+++ source/drivers/gpu/drm/radeon/ni.c 2017-05-31 17:32:41.362386778 -0400 +Only in source/drivers/gpu/drm/radeon: cik.c.orig +diff -ru source/drivers/gpu/drm/radeon/ni.c source/drivers/gpu/drm/radeon/ni.c +--- source/drivers/gpu/drm/radeon/ni.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/radeon/ni.c 2017-08-21 10:54:54.489544208 -0400 @@ -775,7 +775,7 @@ goto out; if (rdev->pfp_fw->size != pfp_req_size) { @@ -530,9 +504,9 @@ diff -ru source.bak/drivers/gpu/drm/radeon/ni.c source/drivers/gpu/drm/radeon/ni fw_name); release_firmware(rdev->pfp_fw); rdev->pfp_fw = NULL; -diff -ru source.bak/drivers/gpu/drm/radeon/r100.c source/drivers/gpu/drm/radeon/r100.c ---- source.bak/drivers/gpu/drm/radeon/r100.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/radeon/r100.c 2017-05-31 17:32:41.362386778 -0400 +diff -ru source/drivers/gpu/drm/radeon/r100.c source/drivers/gpu/drm/radeon/r100.c +--- source/drivers/gpu/drm/radeon/r100.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/radeon/r100.c 2017-08-21 10:54:54.493544208 -0400 @@ -1042,11 +1042,11 @@ err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); @@ -547,9 +521,9 @@ diff -ru source.bak/drivers/gpu/drm/radeon/r100.c source/drivers/gpu/drm/radeon/ rdev->me_fw->size, fw_name); err = -EINVAL; release_firmware(rdev->me_fw); -diff -ru source.bak/drivers/gpu/drm/radeon/r600.c source/drivers/gpu/drm/radeon/r600.c ---- source.bak/drivers/gpu/drm/radeon/r600.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/radeon/r600.c 2017-05-31 17:32:41.362386778 -0400 +diff -ru source/drivers/gpu/drm/radeon/r600.c source/drivers/gpu/drm/radeon/r600.c +--- source/drivers/gpu/drm/radeon/r600.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/radeon/r600.c 2017-08-21 10:54:54.493544208 -0400 @@ -2551,7 +2551,7 @@ goto out; if (rdev->pfp_fw->size != pfp_req_size) { @@ -603,9 +577,9 @@ diff -ru source.bak/drivers/gpu/drm/radeon/r600.c source/drivers/gpu/drm/radeon/ fw_name); release_firmware(rdev->pfp_fw); rdev->pfp_fw = NULL; -diff -ru source.bak/drivers/gpu/drm/radeon/radeon_uvd.c source/drivers/gpu/drm/radeon/radeon_uvd.c ---- source.bak/drivers/gpu/drm/radeon/radeon_uvd.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/radeon/radeon_uvd.c 2017-05-31 17:32:41.366386778 -0400 +diff -ru source/drivers/gpu/drm/radeon/radeon_uvd.c source/drivers/gpu/drm/radeon/radeon_uvd.c +--- source/drivers/gpu/drm/radeon/radeon_uvd.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/radeon/radeon_uvd.c 2017-08-21 10:54:54.493544208 -0400 @@ -140,7 +140,7 @@ /* Let's try to load the newer firmware first */ r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev); @@ -624,9 +598,9 @@ diff -ru source.bak/drivers/gpu/drm/radeon/radeon_uvd.c source/drivers/gpu/drm/r legacy_fw_name); return r; } -diff -ru source.bak/drivers/gpu/drm/radeon/radeon_vce.c source/drivers/gpu/drm/radeon/radeon_vce.c ---- source.bak/drivers/gpu/drm/radeon/radeon_vce.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/gpu/drm/radeon/radeon_vce.c 2017-05-31 17:32:41.366386778 -0400 +diff -ru source/drivers/gpu/drm/radeon/radeon_vce.c source/drivers/gpu/drm/radeon/radeon_vce.c +--- source/drivers/gpu/drm/radeon/radeon_vce.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/radeon/radeon_vce.c 2017-08-21 10:54:54.493544208 -0400 @@ -87,7 +87,7 @@ r = request_firmware(&rdev->vce_fw, fw_name, rdev->dev); @@ -636,10 +610,10 @@ diff -ru source.bak/drivers/gpu/drm/radeon/radeon_vce.c source/drivers/gpu/drm/r fw_name); return r; } -diff -ru source.bak/drivers/gpu/drm/radeon/si.c source/drivers/gpu/drm/radeon/si.c ---- source.bak/drivers/gpu/drm/radeon/si.c 2017-05-31 17:29:09.000000000 -0400 -+++ source/drivers/gpu/drm/radeon/si.c 2017-05-31 17:32:41.366386778 -0400 -@@ -1765,7 +1765,7 @@ +diff -ru source/drivers/gpu/drm/radeon/si.c source/drivers/gpu/drm/radeon/si.c +--- source/drivers/gpu/drm/radeon/si.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/gpu/drm/radeon/si.c 2017-08-21 10:54:54.493544208 -0400 +@@ -1763,7 +1763,7 @@ goto out; if (rdev->pfp_fw->size != pfp_req_size) { printk(KERN_ERR @@ -648,7 +622,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/si.c source/drivers/gpu/drm/radeon/si rdev->pfp_fw->size, fw_name); err = -EINVAL; goto out; -@@ -1774,7 +1774,7 @@ +@@ -1772,7 +1772,7 @@ err = radeon_ucode_validate(rdev->pfp_fw); if (err) { printk(KERN_ERR @@ -657,7 +631,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/si.c source/drivers/gpu/drm/radeon/si fw_name); goto out; } else { -@@ -1791,7 +1791,7 @@ +@@ -1789,7 +1789,7 @@ goto out; if (rdev->me_fw->size != me_req_size) { printk(KERN_ERR @@ -666,7 +640,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/si.c source/drivers/gpu/drm/radeon/si rdev->me_fw->size, fw_name); err = -EINVAL; } -@@ -1799,7 +1799,7 @@ +@@ -1797,7 +1797,7 @@ err = radeon_ucode_validate(rdev->me_fw); if (err) { printk(KERN_ERR @@ -675,7 +649,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/si.c source/drivers/gpu/drm/radeon/si fw_name); goto out; } else { -@@ -1816,7 +1816,7 @@ +@@ -1814,7 +1814,7 @@ goto out; if (rdev->ce_fw->size != ce_req_size) { printk(KERN_ERR @@ -684,7 +658,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/si.c source/drivers/gpu/drm/radeon/si rdev->ce_fw->size, fw_name); err = -EINVAL; } -@@ -1824,7 +1824,7 @@ +@@ -1822,7 +1822,7 @@ err = radeon_ucode_validate(rdev->ce_fw); if (err) { printk(KERN_ERR @@ -693,7 +667,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/si.c source/drivers/gpu/drm/radeon/si fw_name); goto out; } else { -@@ -1841,7 +1841,7 @@ +@@ -1839,7 +1839,7 @@ goto out; if (rdev->rlc_fw->size != rlc_req_size) { printk(KERN_ERR @@ -702,7 +676,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/si.c source/drivers/gpu/drm/radeon/si rdev->rlc_fw->size, fw_name); err = -EINVAL; } -@@ -1849,7 +1849,7 @@ +@@ -1847,7 +1847,7 @@ err = radeon_ucode_validate(rdev->rlc_fw); if (err) { printk(KERN_ERR @@ -711,7 +685,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/si.c source/drivers/gpu/drm/radeon/si fw_name); goto out; } else { -@@ -1871,7 +1871,7 @@ +@@ -1872,7 +1872,7 @@ if ((rdev->mc_fw->size != mc_req_size) && (rdev->mc_fw->size != mc2_req_size)) { printk(KERN_ERR @@ -720,7 +694,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/si.c source/drivers/gpu/drm/radeon/si rdev->mc_fw->size, fw_name); err = -EINVAL; } -@@ -1880,7 +1880,7 @@ +@@ -1881,7 +1881,7 @@ err = radeon_ucode_validate(rdev->mc_fw); if (err) { printk(KERN_ERR @@ -729,7 +703,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/si.c source/drivers/gpu/drm/radeon/si fw_name); goto out; } else { -@@ -1898,14 +1898,14 @@ +@@ -1901,14 +1901,14 @@ err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); if (err) { printk(KERN_ERR @@ -746,7 +720,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/si.c source/drivers/gpu/drm/radeon/si rdev->smc_fw->size, fw_name); err = -EINVAL; } -@@ -1913,7 +1913,7 @@ +@@ -1916,7 +1916,7 @@ err = radeon_ucode_validate(rdev->smc_fw); if (err) { printk(KERN_ERR @@ -755,7 +729,7 @@ diff -ru source.bak/drivers/gpu/drm/radeon/si.c source/drivers/gpu/drm/radeon/si fw_name); goto out; } else { -@@ -1933,7 +1933,7 @@ +@@ -1936,7 +1936,7 @@ if (err) { if (err != -EINVAL) printk(KERN_ERR @@ -764,9 +738,10 @@ diff -ru source.bak/drivers/gpu/drm/radeon/si.c source/drivers/gpu/drm/radeon/si fw_name); release_firmware(rdev->pfp_fw); rdev->pfp_fw = NULL; -diff -ru source.bak/drivers/net/wireless/intel/ipw2x00/ipw2200.c source/drivers/net/wireless/intel/ipw2x00/ipw2200.c ---- source.bak/drivers/net/wireless/intel/ipw2x00/ipw2200.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/net/wireless/intel/ipw2x00/ipw2200.c 2017-05-31 17:32:41.366386778 -0400 +Only in source/drivers/gpu/drm/radeon: si.c.orig +diff -ru source/drivers/net/wireless/intel/ipw2x00/ipw2200.c source/drivers/net/wireless/intel/ipw2x00/ipw2200.c +--- source/drivers/net/wireless/intel/ipw2x00/ipw2200.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/net/wireless/intel/ipw2x00/ipw2200.c 2017-08-21 10:54:54.493544208 -0400 @@ -3419,12 +3419,12 @@ /* ask firmware_class module to get the boot firmware off disk */ rc = request_firmware(raw, name, &priv->pci_dev->dev); @@ -826,9 +801,9 @@ diff -ru source.bak/drivers/net/wireless/intel/ipw2x00/ipw2200.c source/drivers/ return rc; } -diff -ru source.bak/drivers/net/wireless/intel/iwlegacy/3945-mac.c source/drivers/net/wireless/intel/iwlegacy/3945-mac.c ---- source.bak/drivers/net/wireless/intel/iwlegacy/3945-mac.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/net/wireless/intel/iwlegacy/3945-mac.c 2017-05-31 17:32:41.366386778 -0400 +diff -ru source/drivers/net/wireless/intel/iwlegacy/3945-mac.c source/drivers/net/wireless/intel/iwlegacy/3945-mac.c +--- source/drivers/net/wireless/intel/iwlegacy/3945-mac.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/net/wireless/intel/iwlegacy/3945-mac.c 2017-08-21 10:54:54.493544208 -0400 @@ -1861,7 +1861,7 @@ sprintf(buf, "%s%u%s", name_pre, idx, ".ucode"); ret = request_firmware(&ucode_raw, buf, &il->pci_dev->dev); @@ -866,9 +841,9 @@ diff -ru source.bak/drivers/net/wireless/intel/iwlegacy/3945-mac.c source/driver api_ver); IL_INFO("loaded firmware version %u.%u.%u.%u\n", -diff -ru source.bak/drivers/net/wireless/intel/iwlegacy/4965-mac.c source/drivers/net/wireless/intel/iwlegacy/4965-mac.c ---- source.bak/drivers/net/wireless/intel/iwlegacy/4965-mac.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/net/wireless/intel/iwlegacy/4965-mac.c 2017-05-31 17:32:41.370386778 -0400 +diff -ru source/drivers/net/wireless/intel/iwlegacy/4965-mac.c source/drivers/net/wireless/intel/iwlegacy/4965-mac.c +--- source/drivers/net/wireless/intel/iwlegacy/4965-mac.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/net/wireless/intel/iwlegacy/4965-mac.c 2017-08-21 10:54:54.497544208 -0400 @@ -4706,7 +4706,7 @@ sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode"); @@ -906,9 +881,9 @@ diff -ru source.bak/drivers/net/wireless/intel/iwlegacy/4965-mac.c source/driver api_ver); IL_INFO("loaded firmware version %u.%u.%u.%u\n", -diff -ru source.bak/drivers/net/wireless/intel/iwlwifi/iwl-drv.c source/drivers/net/wireless/intel/iwlwifi/iwl-drv.c ---- source.bak/drivers/net/wireless/intel/iwlwifi/iwl-drv.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/net/wireless/intel/iwlwifi/iwl-drv.c 2017-05-31 17:32:41.370386778 -0400 +diff -ru source/drivers/net/wireless/intel/iwlwifi/iwl-drv.c source/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +--- source/drivers/net/wireless/intel/iwlwifi/iwl-drv.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/net/wireless/intel/iwlwifi/iwl-drv.c 2017-08-21 10:54:54.497544208 -0400 @@ -232,7 +232,7 @@ snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode", name_pre, tag); @@ -918,10 +893,10 @@ diff -ru source.bak/drivers/net/wireless/intel/iwlwifi/iwl-drv.c source/drivers/ drv->firmware_name); return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name, -diff -ru source.bak/drivers/net/wireless/intel/iwlwifi/mvm/fw.c source/drivers/net/wireless/intel/iwlwifi/mvm/fw.c ---- source.bak/drivers/net/wireless/intel/iwlwifi/mvm/fw.c 2017-05-31 17:29:09.000000000 -0400 -+++ source/drivers/net/wireless/intel/iwlwifi/mvm/fw.c 2017-05-31 17:32:41.370386778 -0400 -@@ -1279,7 +1279,7 @@ +diff -ru source/drivers/net/wireless/intel/iwlwifi/mvm/fw.c source/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +--- source/drivers/net/wireless/intel/iwlwifi/mvm/fw.c 2017-08-21 12:06:22.000000000 -0400 ++++ source/drivers/net/wireless/intel/iwlwifi/mvm/fw.c 2017-08-21 10:54:54.497544208 -0400 +@@ -1307,7 +1307,7 @@ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN); if (ret) { @@ -930,10 +905,11 @@ diff -ru source.bak/drivers/net/wireless/intel/iwlwifi/mvm/fw.c source/drivers/n goto error; } -diff -ru source.bak/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c source/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c ---- source.bak/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c 2017-05-31 17:32:41.370386778 -0400 -@@ -415,7 +415,7 @@ +Only in source/drivers/net/wireless/intel/iwlwifi/mvm: fw.c.orig +diff -ru source/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c source/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +--- source/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c 2017-08-21 10:54:54.497544208 -0400 +@@ -414,7 +414,7 @@ ret = request_firmware(&fw_entry, mvm->nvm_file_name, mvm->trans->dev); if (ret) { @@ -942,9 +918,10 @@ diff -ru source.bak/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c source/drivers/ mvm->nvm_file_name, ret); return ret; } -diff -ru source.bak/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c source/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c ---- source.bak/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 2017-05-31 17:29:09.000000000 -0400 -+++ source/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 2017-05-31 17:35:21.326386759 -0400 +Only in source/drivers/net/wireless/intel/iwlwifi/mvm: nvm.c.orig +diff -ru source/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c source/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +--- source/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 2017-08-21 10:54:54.497544208 -0400 @@ -2074,9 +2074,9 @@ int ret = 0; u16 signature; @@ -957,147 +934,141 @@ diff -ru source.bak/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c source ret = -EAGAIN; goto exit; } -diff -ru source.bak/drivers/net/wireless/realtek/rtlwifi/core.c source/drivers/net/wireless/realtek/rtlwifi/core.c ---- source.bak/drivers/net/wireless/realtek/rtlwifi/core.c 2017-05-31 17:29:09.000000000 -0400 -+++ source/drivers/net/wireless/realtek/rtlwifi/core.c 2017-05-31 17:32:41.370386778 -0400 -@@ -106,12 +106,12 @@ +diff -ru source/drivers/net/wireless/realtek/rtlwifi/core.c source/drivers/net/wireless/realtek/rtlwifi/core.c +--- source/drivers/net/wireless/realtek/rtlwifi/core.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/net/wireless/realtek/rtlwifi/core.c 2017-08-21 11:09:25.201544104 -0400 +@@ -106,7 +106,7 @@ err = request_firmware(&firmware, rtlpriv->cfg->alt_fw_name, rtlpriv->io.dev); - pr_info("Loading alternative firmware %s\n", -+ pr_info("Loading alternative firmware\n", ++ pr_info("Loading alternative firmware \n", rtlpriv->cfg->alt_fw_name); if (!err) goto found_alt; - } -- pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name); -+ pr_err("Firmware not available\n", rtlpriv->cfg->fw_name); - rtlpriv->max_fw_size = 0; - return; - } -diff -ru source.bak/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c source/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c ---- source.bak/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c 2017-05-31 17:32:41.370386778 -0400 -@@ -171,7 +171,7 @@ +diff -ru source/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c source/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c +--- source/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c 2017-08-21 11:07:03.713544121 -0400 +@@ -172,7 +172,7 @@ - rtlpriv->cfg->fw_name = "rtlwifi/rtl8188efw.bin"; + fw_name = "rtlwifi/rtl8188efw.bin"; rtlpriv->max_fw_size = 0x8000; -- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); -+ pr_info("Requesting firmware\n", rtlpriv->cfg->fw_name); - err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, +- pr_info("Using firmware %s\n", fw_name); ++ pr_info("Using firmware \n", fw_name); + err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); -diff -ru source.bak/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c source/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c ---- source.bak/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c 2017-05-31 17:32:41.370386778 -0400 -@@ -174,7 +174,7 @@ - rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin"; +diff -ru source/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c source/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c +--- source/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c 2017-08-21 11:07:03.737544121 -0400 +@@ -173,7 +173,7 @@ + fw_name = "rtlwifi/rtl8192cfw.bin"; rtlpriv->max_fw_size = 0x4000; -- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); -+ pr_info("Requesting firmware\n", rtlpriv->cfg->fw_name); - err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, +- pr_info("Using firmware %s\n", fw_name); ++ pr_info("Using firmware \n", fw_name); + err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); -diff -ru source.bak/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c source/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c ---- source.bak/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c 2017-05-31 17:32:41.370386778 -0400 -@@ -85,7 +85,7 @@ +diff -ru source/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c source/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c +--- source/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c 2017-08-21 11:07:03.737544121 -0400 +@@ -82,7 +82,7 @@ } /* provide name of alternative file */ rtlpriv->cfg->alt_fw_name = "rtlwifi/rtl8192cufw.bin"; -- pr_info("Loading firmware %s\n", rtlpriv->cfg->fw_name); -+ pr_info("Requesting firmware\n", rtlpriv->cfg->fw_name); +- pr_info("Loading firmware %s\n", fw_name); ++ pr_info("Loading firmware \n", fw_name); rtlpriv->max_fw_size = 0x4000; err = request_firmware_nowait(THIS_MODULE, 1, - rtlpriv->cfg->fw_name, rtlpriv->io.dev, -diff -ru source.bak/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c source/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c ---- source.bak/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c 2017-05-31 17:32:41.370386778 -0400 -@@ -181,7 +181,7 @@ + fw_name, rtlpriv->io.dev, +diff -ru source/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c source/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +--- source/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c 2017-08-21 11:09:58.965544100 -0400 +@@ -178,7 +178,7 @@ rtlpriv->max_fw_size = 0x8000; pr_info("Driver for Realtek RTL8192DE WLAN interface\n"); -- pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name); -+ pr_info("Loading firmware file\n", rtlpriv->cfg->fw_name); +- pr_info("Loading firmware file %s\n", fw_name); ++ pr_info("Loading firmware file\n", fw_name); /* request fw */ - err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, -diff -ru source.bak/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c source/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c ---- source.bak/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c 2017-05-31 17:34:08.242386768 -0400 -@@ -173,7 +173,7 @@ - rtlpriv->cfg->fw_name = "rtlwifi/rtl8192eefw.bin"; + err = request_firmware_nowait(THIS_MODULE, 1, fw_name, +diff -ru source/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c source/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +--- source/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c 2017-08-21 11:07:03.741544121 -0400 +@@ -174,7 +174,7 @@ + fw_name = "rtlwifi/rtl8192eefw.bin"; rtlpriv->max_fw_size = 0x8000; -- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); -+ pr_info("Loading firmware\n", rtlpriv->cfg->fw_name); - err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, +- pr_info("Using firmware %s\n", fw_name); ++ pr_info("Using firmware \n", fw_name); + err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); -diff -ru source.bak/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c source/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c ---- source.bak/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c 2017-05-31 17:32:41.370386778 -0400 -@@ -94,7 +94,7 @@ +diff -ru source/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c source/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c +--- source/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c 2017-08-21 11:08:21.829544111 -0400 +@@ -91,7 +91,7 @@ "Firmware callback routine entered!\n"); complete(&rtlpriv->firmware_loading_complete); if (!firmware) { -- pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name); -+ pr_err("Firmware not available\n", rtlpriv->cfg->fw_name); +- pr_err("Firmware %s not available\n", fw_name); ++ pr_err("Firmware not available\n", fw_name); rtlpriv->max_fw_size = 0; return; } -@@ -214,7 +214,7 @@ +@@ -212,7 +212,7 @@ rtlpriv->max_fw_size = RTL8190_MAX_FIRMWARE_CODE_SIZE*2 + sizeof(struct fw_hdr); pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n" -- "Loading firmware %s\n", rtlpriv->cfg->fw_name); -+ "Loading firmware\n", rtlpriv->cfg->fw_name); +- "Loading firmware %s\n", fw_name); ++ "Loading firmware \n", fw_name); /* request fw */ - err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, + err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, -diff -ru source.bak/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c source/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c ---- source.bak/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c 2017-05-31 17:32:41.370386778 -0400 -@@ -182,7 +182,7 @@ - rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw_B.bin"; +diff -ru source/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c source/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c +--- source/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c 2017-08-21 11:07:03.741544121 -0400 +@@ -181,7 +181,7 @@ + fw_name = "rtlwifi/rtl8723fw_B.bin"; rtlpriv->max_fw_size = 0x6000; -- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); -+ pr_info("Requesting firmware\n", rtlpriv->cfg->fw_name); - err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, +- pr_info("Using firmware %s\n", fw_name); ++ pr_info("Using firmware \n", fw_name); + err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); -diff -ru source.bak/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c source/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c ---- source.bak/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c 2017-05-31 17:33:46.026386770 -0400 -@@ -184,7 +184,7 @@ +diff -ru source/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c source/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +--- source/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c 2017-08-21 11:07:03.741544121 -0400 +@@ -185,7 +185,7 @@ } rtlpriv->max_fw_size = 0x8000; -- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); -+ pr_info("Loading firmware\n", rtlpriv->cfg->fw_name); - err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, +- pr_info("Using firmware %s\n", fw_name); ++ pr_info("Using firmware \n", fw_name); + err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); -diff -ru source.bak/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c source/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c ---- source.bak/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c 2016-10-02 19:24:33.000000000 -0400 -+++ source/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c 2017-05-31 17:34:40.466386764 -0400 -@@ -212,7 +212,7 @@ +diff -ru source/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c source/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +--- source/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c 2017-02-19 17:34:00.000000000 -0500 ++++ source/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c 2017-08-21 11:07:03.741544121 -0400 +@@ -213,7 +213,7 @@ rtlpriv->max_fw_size = 0x8000; /*load normal firmware*/ -- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); -+ pr_info("Loading firmware\n", rtlpriv->cfg->fw_name); - err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, +- pr_info("Using firmware %s\n", fw_name); ++ pr_info("Using firmware \n", fw_name); + err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); -@@ -222,7 +222,7 @@ +@@ -223,7 +223,7 @@ return 1; } /*load wowlan firmware*/ -- pr_info("Using firmware %s\n", rtlpriv->cfg->wowlan_fw_name); -+ pr_info("Loading firmware\n", rtlpriv->cfg->wowlan_fw_name); +- pr_info("Using firmware %s\n", wowlan_fw_name); ++ pr_info("Using firmware \n", wowlan_fw_name); err = request_firmware_nowait(THIS_MODULE, 1, - rtlpriv->cfg->wowlan_fw_name, + wowlan_fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, diff --git a/helpers/make-linux-hwe b/helpers/make-linux-hwe index cee81d9e..942480e8 100644 --- a/helpers/make-linux-hwe +++ b/helpers/make-linux-hwe @@ -34,7 +34,7 @@ for FILE in $PRESERVE; do cp $FILE /tmp/preserve --parents -a done -sh $DATA/deblob-4.8 --force +sh $DATA/deblob-4.10 --force sed 's/bnx2.*fw/$(DEBLOBBED)/' -i firmware/Makefile cp /tmp/preserve/* . -a @@ -48,7 +48,7 @@ rm zfs spl debian/scripts/misc/update-zfs.sh -rf # Remove VBox rm ubuntu/vbox* -rf -sed /vbox/d -i debian.master/info/RECONCILE debian.master/control.d/generic.inclusion-list ubuntu/Makefile +sed /vbox/d -i debian.master/control.d/generic.inclusion-list ubuntu/Makefile sed '/vbox/d' -i debian.master/reconstruct # Compile with less modules and avoid abi check -- GitLab