mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
Compare commits
82 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f74c2bb987 | ||
|
|
983f700eab | ||
|
|
def8b72f0e | ||
|
|
bfafddd8de | ||
|
|
950b07c14e | ||
|
|
b3a9964cfa | ||
|
|
a8e0abae2f | ||
|
|
d3464ccd10 | ||
|
|
1e3778cb22 | ||
|
|
7641033e17 | ||
|
|
9772152b4b | ||
|
|
0c043d70d0 | ||
|
|
36daa831b5 | ||
|
|
30d7030b2f | ||
|
|
76f5e9f870 | ||
|
|
0445971000 | ||
|
|
08d433d812 | ||
|
|
9d098a6234 | ||
|
|
13da6ac106 | ||
|
|
1f493162b5 | ||
|
|
8928e917ae | ||
|
|
754265bcab | ||
|
|
36b7200f67 | ||
|
|
1e19ec6c3c | ||
|
|
7610bb0bde | ||
|
|
d41a3effbb | ||
|
|
a3f5e1f578 | ||
|
|
19e4147a04 | ||
|
|
262f7eeddc | ||
|
|
13133f933a | ||
|
|
13212a648f | ||
|
|
08b0c89160 | ||
|
|
f56f791f6d | ||
|
|
02e740aeca | ||
|
|
b0841eefd9 | ||
|
|
3b47fd5ca9 | ||
|
|
1251201c0d | ||
|
|
d9a2b63ca9 | ||
|
|
a8318c13e7 | ||
|
|
8205d5d98e | ||
|
|
02fa5d7b17 | ||
|
|
3d2c7d3704 | ||
|
|
896fc242bc | ||
|
|
718be6bab2 | ||
|
|
264b563b86 | ||
|
|
93d2c4de8d | ||
|
|
2a36c16efa | ||
|
|
cf24aac386 | ||
|
|
689379c2f3 | ||
|
|
8ad8e02c2f | ||
|
|
8744daf4b0 | ||
|
|
b0dfce90e3 | ||
|
|
5e2d2cc258 | ||
|
|
60083f9e94 | ||
|
|
47320fbe11 | ||
|
|
f19e4ed1e1 | ||
|
|
ff4dd08197 | ||
|
|
4030b4c585 | ||
|
|
eb3d8f4223 | ||
|
|
d33cd42d86 | ||
|
|
9b8bd476e7 | ||
|
|
29d9a0b507 | ||
|
|
55f7e5c364 | ||
|
|
ae688e1720 | ||
|
|
52d083472e | ||
|
|
0ce4a85f4f | ||
|
|
89f2c0425c | ||
|
|
89781d0806 | ||
|
|
424c38a4e3 | ||
|
|
3764137906 | ||
|
|
728a257f65 | ||
|
|
325d0ab3a1 | ||
|
|
0622800d2e | ||
|
|
333f31436d | ||
|
|
807f6c8472 | ||
|
|
274b924088 | ||
|
|
438b6c20e6 | ||
|
|
bc624a06f0 | ||
|
|
962411b05a | ||
|
|
2c231c0c1d | ||
|
|
b9ee5e04fd | ||
|
|
45f5d5a9e3 |
@@ -107,10 +107,13 @@ ForEachMacros:
|
||||
- 'css_for_each_descendant_post'
|
||||
- 'css_for_each_descendant_pre'
|
||||
- 'device_for_each_child_node'
|
||||
- 'dma_fence_chain_for_each'
|
||||
- 'drm_atomic_crtc_for_each_plane'
|
||||
- 'drm_atomic_crtc_state_for_each_plane'
|
||||
- 'drm_atomic_crtc_state_for_each_plane_state'
|
||||
- 'drm_atomic_for_each_plane_damage'
|
||||
- 'drm_client_for_each_connector_iter'
|
||||
- 'drm_client_for_each_modeset'
|
||||
- 'drm_connector_for_each_possible_encoder'
|
||||
- 'drm_for_each_connector_iter'
|
||||
- 'drm_for_each_crtc'
|
||||
@@ -126,6 +129,7 @@ ForEachMacros:
|
||||
- 'drm_mm_for_each_node_in_range'
|
||||
- 'drm_mm_for_each_node_safe'
|
||||
- 'flow_action_for_each'
|
||||
- 'for_each_active_dev_scope'
|
||||
- 'for_each_active_drhd_unit'
|
||||
- 'for_each_active_iommu'
|
||||
- 'for_each_available_child_of_node'
|
||||
@@ -153,6 +157,8 @@ ForEachMacros:
|
||||
- 'for_each_cpu_not'
|
||||
- 'for_each_cpu_wrap'
|
||||
- 'for_each_dev_addr'
|
||||
- 'for_each_dev_scope'
|
||||
- 'for_each_displayid_db'
|
||||
- 'for_each_dma_cap_mask'
|
||||
- 'for_each_dpcm_be'
|
||||
- 'for_each_dpcm_be_rollback'
|
||||
@@ -169,6 +175,8 @@ ForEachMacros:
|
||||
- 'for_each_evictable_lru'
|
||||
- 'for_each_fib6_node_rt_rcu'
|
||||
- 'for_each_fib6_walker_rt'
|
||||
- 'for_each_free_mem_pfn_range_in_zone'
|
||||
- 'for_each_free_mem_pfn_range_in_zone_from'
|
||||
- 'for_each_free_mem_range'
|
||||
- 'for_each_free_mem_range_reverse'
|
||||
- 'for_each_func_rsrc'
|
||||
@@ -178,6 +186,7 @@ ForEachMacros:
|
||||
- 'for_each_ip_tunnel_rcu'
|
||||
- 'for_each_irq_nr'
|
||||
- 'for_each_link_codecs'
|
||||
- 'for_each_link_platforms'
|
||||
- 'for_each_lru'
|
||||
- 'for_each_matching_node'
|
||||
- 'for_each_matching_node_and_match'
|
||||
@@ -302,7 +311,10 @@ ForEachMacros:
|
||||
- 'ide_port_for_each_present_dev'
|
||||
- 'idr_for_each_entry'
|
||||
- 'idr_for_each_entry_continue'
|
||||
- 'idr_for_each_entry_continue_ul'
|
||||
- 'idr_for_each_entry_ul'
|
||||
- 'in_dev_for_each_ifa_rcu'
|
||||
- 'in_dev_for_each_ifa_rtnl'
|
||||
- 'inet_bind_bucket_for_each'
|
||||
- 'inet_lhash2_for_each_icsk_rcu'
|
||||
- 'key_for_each'
|
||||
@@ -343,8 +355,6 @@ ForEachMacros:
|
||||
- 'media_device_for_each_intf'
|
||||
- 'media_device_for_each_link'
|
||||
- 'media_device_for_each_pad'
|
||||
- 'mp_bvec_for_each_page'
|
||||
- 'mp_bvec_for_each_segment'
|
||||
- 'nanddev_io_for_each_page'
|
||||
- 'netdev_for_each_lower_dev'
|
||||
- 'netdev_for_each_lower_private'
|
||||
@@ -381,18 +391,19 @@ ForEachMacros:
|
||||
- 'radix_tree_for_each_slot'
|
||||
- 'radix_tree_for_each_tagged'
|
||||
- 'rbtree_postorder_for_each_entry_safe'
|
||||
- 'rdma_for_each_block'
|
||||
- 'rdma_for_each_port'
|
||||
- 'resource_list_for_each_entry'
|
||||
- 'resource_list_for_each_entry_safe'
|
||||
- 'rhl_for_each_entry_rcu'
|
||||
- 'rhl_for_each_rcu'
|
||||
- 'rht_for_each'
|
||||
- 'rht_for_each_from'
|
||||
- 'rht_for_each_entry'
|
||||
- 'rht_for_each_entry_from'
|
||||
- 'rht_for_each_entry_rcu'
|
||||
- 'rht_for_each_entry_rcu_from'
|
||||
- 'rht_for_each_entry_safe'
|
||||
- 'rht_for_each_from'
|
||||
- 'rht_for_each_rcu'
|
||||
- 'rht_for_each_rcu_from'
|
||||
- '__rq_for_each_bio'
|
||||
|
||||
@@ -5,7 +5,7 @@ Scope
|
||||
-----
|
||||
|
||||
Hardware issues which result in security problems are a different category
|
||||
of security bugs than pure software bugs which only affect the Linux
|
||||
of security bugs than pure software bugs which only affect the Linux
|
||||
kernel.
|
||||
|
||||
Hardware issues like Meltdown, Spectre, L1TF etc. must be treated
|
||||
@@ -159,7 +159,7 @@ Mitigation development
|
||||
|
||||
The initial response team sets up an encrypted mailing-list or repurposes
|
||||
an existing one if appropriate. The disclosing party should provide a list
|
||||
of contacts for all other parties who have already been, or should be
|
||||
of contacts for all other parties who have already been, or should be,
|
||||
informed about the issue. The response team contacts these parties so they
|
||||
can name experts who should be subscribed to the mailing-list.
|
||||
|
||||
@@ -217,11 +217,11 @@ an involved disclosed party. The current ambassadors list:
|
||||
AMD
|
||||
IBM
|
||||
Intel
|
||||
Qualcomm
|
||||
Qualcomm Trilok Soni <tsoni@codeaurora.org>
|
||||
|
||||
Microsoft
|
||||
Microsoft Sasha Levin <sashal@kernel.org>
|
||||
VMware
|
||||
XEN
|
||||
Xen Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
|
||||
Canonical Tyler Hicks <tyhicks@canonical.com>
|
||||
Debian Ben Hutchings <ben@decadent.org.uk>
|
||||
@@ -230,8 +230,8 @@ an involved disclosed party. The current ambassadors list:
|
||||
SUSE Jiri Kosina <jkosina@suse.cz>
|
||||
|
||||
Amazon
|
||||
Google
|
||||
============== ========================================================
|
||||
Google Kees Cook <keescook@chromium.org>
|
||||
============= ========================================================
|
||||
|
||||
If you want your organization to be added to the ambassadors list, please
|
||||
contact the hardware security team. The nominated ambassador has to
|
||||
|
||||
2
Makefile
2
Makefile
@@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 3
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc7
|
||||
EXTRAVERSION = -rc8
|
||||
NAME = Bobtail Squid
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
||||
@@ -279,6 +279,7 @@
|
||||
mmc-hs200-1_8v;
|
||||
non-removable;
|
||||
fixed-emmc-driver-type = <1>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&usb_extal_clk {
|
||||
|
||||
@@ -97,7 +97,7 @@
|
||||
reg = <0x0 0x48000000 0x0 0x18000000>;
|
||||
};
|
||||
|
||||
reg_1p8v: regulator0 {
|
||||
reg_1p8v: regulator-1p8v {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "fixed-1.8V";
|
||||
regulator-min-microvolt = <1800000>;
|
||||
@@ -106,7 +106,7 @@
|
||||
regulator-always-on;
|
||||
};
|
||||
|
||||
reg_3p3v: regulator1 {
|
||||
reg_3p3v: regulator-3p3v {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "fixed-3.3V";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
@@ -115,7 +115,7 @@
|
||||
regulator-always-on;
|
||||
};
|
||||
|
||||
reg_12p0v: regulator1 {
|
||||
reg_12p0v: regulator-12p0v {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "D12.0V";
|
||||
regulator-min-microvolt = <12000000>;
|
||||
|
||||
@@ -101,21 +101,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
|
||||
}
|
||||
}
|
||||
|
||||
static bool tm_active_with_fp(struct task_struct *tsk)
|
||||
{
|
||||
return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
|
||||
(tsk->thread.ckpt_regs.msr & MSR_FP);
|
||||
}
|
||||
|
||||
static bool tm_active_with_altivec(struct task_struct *tsk)
|
||||
{
|
||||
return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
|
||||
(tsk->thread.ckpt_regs.msr & MSR_VEC);
|
||||
}
|
||||
#else
|
||||
static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
|
||||
static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
|
||||
static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
|
||||
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||
|
||||
bool strict_msr_control;
|
||||
@@ -252,7 +239,7 @@ EXPORT_SYMBOL(enable_kernel_fp);
|
||||
|
||||
static int restore_fp(struct task_struct *tsk)
|
||||
{
|
||||
if (tsk->thread.load_fp || tm_active_with_fp(tsk)) {
|
||||
if (tsk->thread.load_fp) {
|
||||
load_fp_state(¤t->thread.fp_state);
|
||||
current->thread.load_fp++;
|
||||
return 1;
|
||||
@@ -334,8 +321,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
|
||||
|
||||
static int restore_altivec(struct task_struct *tsk)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
|
||||
(tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
|
||||
if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
|
||||
load_vr_state(&tsk->thread.vr_state);
|
||||
tsk->thread.used_vr = 1;
|
||||
tsk->thread.load_vec++;
|
||||
@@ -497,13 +483,14 @@ void giveup_all(struct task_struct *tsk)
|
||||
if (!tsk->thread.regs)
|
||||
return;
|
||||
|
||||
check_if_tm_restore_required(tsk);
|
||||
|
||||
usermsr = tsk->thread.regs->msr;
|
||||
|
||||
if ((usermsr & msr_all_available) == 0)
|
||||
return;
|
||||
|
||||
msr_check_and_set(msr_all_available);
|
||||
check_if_tm_restore_required(tsk);
|
||||
|
||||
WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
|
||||
|
||||
|
||||
@@ -630,7 +630,6 @@ static void early_init_this_mmu(void)
|
||||
#ifdef CONFIG_PPC_FSL_BOOK3E
|
||||
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
|
||||
unsigned int num_cams;
|
||||
int __maybe_unused cpu = smp_processor_id();
|
||||
bool map = true;
|
||||
|
||||
/* use a quarter of the TLBCAM for bolted linear map */
|
||||
|
||||
@@ -37,12 +37,14 @@ static inline int fill_gva_list(u64 gva_list[], int offset,
|
||||
* Lower 12 bits encode the number of additional
|
||||
* pages to flush (in addition to the 'cur' page).
|
||||
*/
|
||||
if (diff >= HV_TLB_FLUSH_UNIT)
|
||||
if (diff >= HV_TLB_FLUSH_UNIT) {
|
||||
gva_list[gva_n] |= ~PAGE_MASK;
|
||||
else if (diff)
|
||||
cur += HV_TLB_FLUSH_UNIT;
|
||||
} else if (diff) {
|
||||
gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
|
||||
cur = end;
|
||||
}
|
||||
|
||||
cur += HV_TLB_FLUSH_UNIT;
|
||||
gva_n++;
|
||||
|
||||
} while (cur < end);
|
||||
|
||||
@@ -70,6 +70,7 @@ static void sanitize_boot_params(struct boot_params *boot_params)
|
||||
BOOT_PARAM_PRESERVE(eddbuf_entries),
|
||||
BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
|
||||
BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
|
||||
BOOT_PARAM_PRESERVE(secure_boot),
|
||||
BOOT_PARAM_PRESERVE(hdr),
|
||||
BOOT_PARAM_PRESERVE(e820_table),
|
||||
BOOT_PARAM_PRESERVE(eddbuf),
|
||||
|
||||
@@ -444,8 +444,10 @@ __pu_label: \
|
||||
({ \
|
||||
int __gu_err; \
|
||||
__inttype(*(ptr)) __gu_val; \
|
||||
__typeof__(ptr) __gu_ptr = (ptr); \
|
||||
__typeof__(size) __gu_size = (size); \
|
||||
__uaccess_begin_nospec(); \
|
||||
__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
|
||||
__get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \
|
||||
__uaccess_end(); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
__builtin_expect(__gu_err, 0); \
|
||||
|
||||
@@ -1179,10 +1179,6 @@ void clear_local_APIC(void)
|
||||
apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
|
||||
v = apic_read(APIC_LVT1);
|
||||
apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
|
||||
if (!x2apic_enabled()) {
|
||||
v = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
|
||||
apic_write(APIC_LDR, v);
|
||||
}
|
||||
if (maxlvt >= 4) {
|
||||
v = apic_read(APIC_LVTPC);
|
||||
apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
|
||||
|
||||
@@ -192,6 +192,7 @@ struct rcar_dmac_chan {
|
||||
* @iomem: remapped I/O memory base
|
||||
* @n_channels: number of available channels
|
||||
* @channels: array of DMAC channels
|
||||
* @channels_mask: bitfield of which DMA channels are managed by this driver
|
||||
* @modules: bitmask of client modules in use
|
||||
*/
|
||||
struct rcar_dmac {
|
||||
@@ -202,6 +203,7 @@ struct rcar_dmac {
|
||||
|
||||
unsigned int n_channels;
|
||||
struct rcar_dmac_chan *channels;
|
||||
unsigned int channels_mask;
|
||||
|
||||
DECLARE_BITMAP(modules, 256);
|
||||
};
|
||||
@@ -438,7 +440,7 @@ static int rcar_dmac_init(struct rcar_dmac *dmac)
|
||||
u16 dmaor;
|
||||
|
||||
/* Clear all channels and enable the DMAC globally. */
|
||||
rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0));
|
||||
rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
|
||||
rcar_dmac_write(dmac, RCAR_DMAOR,
|
||||
RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
|
||||
|
||||
@@ -814,6 +816,9 @@ static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
|
||||
for (i = 0; i < dmac->n_channels; ++i) {
|
||||
struct rcar_dmac_chan *chan = &dmac->channels[i];
|
||||
|
||||
if (!(dmac->channels_mask & BIT(i)))
|
||||
continue;
|
||||
|
||||
/* Stop and reinitialize the channel. */
|
||||
spin_lock_irq(&chan->lock);
|
||||
rcar_dmac_chan_halt(chan);
|
||||
@@ -1776,6 +1781,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define RCAR_DMAC_MAX_CHANNELS 32
|
||||
|
||||
static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
|
||||
{
|
||||
struct device_node *np = dev->of_node;
|
||||
@@ -1787,12 +1794,16 @@ static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
|
||||
/* The hardware and driver don't support more than 32 bits in CHCLR */
|
||||
if (dmac->n_channels <= 0 ||
|
||||
dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) {
|
||||
dev_err(dev, "invalid number of channels %u\n",
|
||||
dmac->n_channels);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1802,7 +1813,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
||||
DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
|
||||
DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
|
||||
DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
|
||||
unsigned int channels_offset = 0;
|
||||
struct dma_device *engine;
|
||||
struct rcar_dmac *dmac;
|
||||
struct resource *mem;
|
||||
@@ -1831,10 +1841,8 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
||||
* level we can't disable it selectively, so ignore channel 0 for now if
|
||||
* the device is part of an IOMMU group.
|
||||
*/
|
||||
if (device_iommu_mapped(&pdev->dev)) {
|
||||
dmac->n_channels--;
|
||||
channels_offset = 1;
|
||||
}
|
||||
if (device_iommu_mapped(&pdev->dev))
|
||||
dmac->channels_mask &= ~BIT(0);
|
||||
|
||||
dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
|
||||
sizeof(*dmac->channels), GFP_KERNEL);
|
||||
@@ -1892,8 +1900,10 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
||||
INIT_LIST_HEAD(&engine->channels);
|
||||
|
||||
for (i = 0; i < dmac->n_channels; ++i) {
|
||||
ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
|
||||
i + channels_offset);
|
||||
if (!(dmac->channels_mask & BIT(i)))
|
||||
continue;
|
||||
|
||||
ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
}
|
||||
|
||||
@@ -908,6 +908,7 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
|
||||
struct dma_slave_config *slave_cfg = &schan->slave_cfg;
|
||||
dma_addr_t src = 0, dst = 0;
|
||||
dma_addr_t start_src = 0, start_dst = 0;
|
||||
struct sprd_dma_desc *sdesc;
|
||||
struct scatterlist *sg;
|
||||
u32 len = 0;
|
||||
@@ -954,6 +955,11 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||
dst = sg_dma_address(sg);
|
||||
}
|
||||
|
||||
if (!i) {
|
||||
start_src = src;
|
||||
start_dst = dst;
|
||||
}
|
||||
|
||||
/*
|
||||
* The link-list mode needs at least 2 link-list
|
||||
* configurations. If there is only one sg, it doesn't
|
||||
@@ -970,8 +976,8 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||
}
|
||||
}
|
||||
|
||||
ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
|
||||
dir, flags, slave_cfg);
|
||||
ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, start_src,
|
||||
start_dst, len, dir, flags, slave_cfg);
|
||||
if (ret) {
|
||||
kfree(sdesc);
|
||||
return NULL;
|
||||
|
||||
@@ -391,8 +391,10 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
|
||||
|
||||
ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
|
||||
nelm * 2);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
kfree(rsv_events);
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < nelm; i++) {
|
||||
ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
|
||||
|
||||
@@ -1540,8 +1540,10 @@ static int omap_dma_probe(struct platform_device *pdev)
|
||||
|
||||
rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
|
||||
IRQF_SHARED, "omap-dma-engine", od);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
omap_dma_free(od);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
|
||||
|
||||
@@ -604,10 +604,9 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
|
||||
u8 new_irqs;
|
||||
int level, i;
|
||||
u8 invert_irq_mask[MAX_BANK];
|
||||
int reg_direction[MAX_BANK];
|
||||
u8 reg_direction[MAX_BANK];
|
||||
|
||||
regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
|
||||
NBANK(chip));
|
||||
pca953x_read_regs(chip, chip->regs->direction, reg_direction);
|
||||
|
||||
if (chip->driver_data & PCA_PCAL) {
|
||||
/* Enable latch on interrupt-enabled inputs */
|
||||
@@ -679,7 +678,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
|
||||
bool pending_seen = false;
|
||||
bool trigger_seen = false;
|
||||
u8 trigger[MAX_BANK];
|
||||
int reg_direction[MAX_BANK];
|
||||
u8 reg_direction[MAX_BANK];
|
||||
int ret, i;
|
||||
|
||||
if (chip->driver_data & PCA_PCAL) {
|
||||
@@ -710,8 +709,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
|
||||
return false;
|
||||
|
||||
/* Remove output pins from the equation */
|
||||
regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
|
||||
NBANK(chip));
|
||||
pca953x_read_regs(chip, chip->regs->direction, reg_direction);
|
||||
for (i = 0; i < NBANK(chip); i++)
|
||||
cur_stat[i] &= reg_direction[i];
|
||||
|
||||
@@ -768,7 +766,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
|
||||
{
|
||||
struct i2c_client *client = chip->client;
|
||||
struct irq_chip *irq_chip = &chip->irq_chip;
|
||||
int reg_direction[MAX_BANK];
|
||||
u8 reg_direction[MAX_BANK];
|
||||
int ret, i;
|
||||
|
||||
if (!client->irq)
|
||||
@@ -789,8 +787,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
|
||||
* interrupt. We have to rely on the previous read for
|
||||
* this purpose.
|
||||
*/
|
||||
regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
|
||||
NBANK(chip));
|
||||
pca953x_read_regs(chip, chip->regs->direction, reg_direction);
|
||||
for (i = 0; i < NBANK(chip); i++)
|
||||
chip->irq_stat[i] &= reg_direction[i];
|
||||
mutex_init(&chip->irq_lock);
|
||||
|
||||
@@ -1454,6 +1454,7 @@ static int drm_mode_parse_cmdline_refresh(const char *str, char **end_ptr,
|
||||
}
|
||||
|
||||
static int drm_mode_parse_cmdline_extra(const char *str, int length,
|
||||
bool freestanding,
|
||||
const struct drm_connector *connector,
|
||||
struct drm_cmdline_mode *mode)
|
||||
{
|
||||
@@ -1462,9 +1463,15 @@ static int drm_mode_parse_cmdline_extra(const char *str, int length,
|
||||
for (i = 0; i < length; i++) {
|
||||
switch (str[i]) {
|
||||
case 'i':
|
||||
if (freestanding)
|
||||
return -EINVAL;
|
||||
|
||||
mode->interlace = true;
|
||||
break;
|
||||
case 'm':
|
||||
if (freestanding)
|
||||
return -EINVAL;
|
||||
|
||||
mode->margins = true;
|
||||
break;
|
||||
case 'D':
|
||||
@@ -1542,6 +1549,7 @@ static int drm_mode_parse_cmdline_res_mode(const char *str, unsigned int length,
|
||||
if (extras) {
|
||||
int ret = drm_mode_parse_cmdline_extra(end_ptr + i,
|
||||
1,
|
||||
false,
|
||||
connector,
|
||||
mode);
|
||||
if (ret)
|
||||
@@ -1669,6 +1677,22 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char *drm_named_modes_whitelist[] = {
|
||||
"NTSC",
|
||||
"PAL",
|
||||
};
|
||||
|
||||
static bool drm_named_mode_is_in_whitelist(const char *mode, unsigned int size)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(drm_named_modes_whitelist); i++)
|
||||
if (!strncmp(mode, drm_named_modes_whitelist[i], size))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_mode_parse_command_line_for_connector - parse command line modeline for connector
|
||||
* @mode_option: optional per connector mode option
|
||||
@@ -1725,16 +1749,30 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
|
||||
* bunch of things:
|
||||
* - We need to make sure that the first character (which
|
||||
* would be our resolution in X) is a digit.
|
||||
* - However, if the X resolution is missing, then we end up
|
||||
* with something like x<yres>, with our first character
|
||||
* being an alpha-numerical character, which would be
|
||||
* considered a named mode.
|
||||
* - If not, then it's either a named mode or a force on/off.
|
||||
* To distinguish between the two, we need to run the
|
||||
* extra parsing function, and if not, then we consider it
|
||||
* a named mode.
|
||||
*
|
||||
* If this isn't enough, we should add more heuristics here,
|
||||
* and matching unit-tests.
|
||||
*/
|
||||
if (!isdigit(name[0]) && name[0] != 'x')
|
||||
if (!isdigit(name[0]) && name[0] != 'x') {
|
||||
unsigned int namelen = strlen(name);
|
||||
|
||||
/*
|
||||
* Only the force on/off options can be in that case,
|
||||
* and they all take a single character.
|
||||
*/
|
||||
if (namelen == 1) {
|
||||
ret = drm_mode_parse_cmdline_extra(name, namelen, true,
|
||||
connector, mode);
|
||||
if (!ret)
|
||||
return true;
|
||||
}
|
||||
|
||||
named_mode = true;
|
||||
}
|
||||
|
||||
/* Try to locate the bpp and refresh specifiers, if any */
|
||||
bpp_ptr = strchr(name, '-');
|
||||
@@ -1772,6 +1810,10 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
|
||||
if (named_mode) {
|
||||
if (mode_end + 1 > DRM_DISPLAY_MODE_LEN)
|
||||
return false;
|
||||
|
||||
if (!drm_named_mode_is_in_whitelist(name, mode_end))
|
||||
return false;
|
||||
|
||||
strscpy(mode->name, name, mode_end + 1);
|
||||
} else {
|
||||
ret = drm_mode_parse_cmdline_res_mode(name, mode_end,
|
||||
@@ -1811,7 +1853,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
|
||||
extra_ptr != options_ptr) {
|
||||
int len = strlen(name) - (extra_ptr - name);
|
||||
|
||||
ret = drm_mode_parse_cmdline_extra(extra_ptr, len,
|
||||
ret = drm_mode_parse_cmdline_extra(extra_ptr, len, false,
|
||||
connector, mode);
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
@@ -656,10 +656,9 @@ static int ingenic_drm_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (panel) {
|
||||
if (panel)
|
||||
bridge = devm_drm_panel_bridge_add(dev, panel,
|
||||
DRM_MODE_CONNECTOR_Unknown);
|
||||
}
|
||||
DRM_MODE_CONNECTOR_DPI);
|
||||
|
||||
priv->dma_hwdesc = dma_alloc_coherent(dev, sizeof(*priv->dma_hwdesc),
|
||||
&priv->dma_hwdesc_phys,
|
||||
|
||||
@@ -190,6 +190,9 @@ MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
|
||||
@@ -210,6 +213,9 @@ MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
|
||||
@@ -230,6 +236,9 @@ MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
|
||||
@@ -250,3 +259,6 @@ MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
|
||||
|
||||
@@ -9,6 +9,13 @@
|
||||
|
||||
#define cmdline_test(test) selftest(test, test)
|
||||
|
||||
cmdline_test(drm_cmdline_test_force_d_only)
|
||||
cmdline_test(drm_cmdline_test_force_D_only_dvi)
|
||||
cmdline_test(drm_cmdline_test_force_D_only_hdmi)
|
||||
cmdline_test(drm_cmdline_test_force_D_only_not_digital)
|
||||
cmdline_test(drm_cmdline_test_force_e_only)
|
||||
cmdline_test(drm_cmdline_test_margin_only)
|
||||
cmdline_test(drm_cmdline_test_interlace_only)
|
||||
cmdline_test(drm_cmdline_test_res)
|
||||
cmdline_test(drm_cmdline_test_res_missing_x)
|
||||
cmdline_test(drm_cmdline_test_res_missing_y)
|
||||
|
||||
@@ -17,6 +17,136 @@
|
||||
|
||||
static const struct drm_connector no_connector = {};
|
||||
|
||||
static int drm_cmdline_test_force_e_only(void *ignored)
|
||||
{
|
||||
struct drm_cmdline_mode mode = { };
|
||||
|
||||
FAIL_ON(!drm_mode_parse_command_line_for_connector("e",
|
||||
&no_connector,
|
||||
&mode));
|
||||
FAIL_ON(mode.specified);
|
||||
FAIL_ON(mode.refresh_specified);
|
||||
FAIL_ON(mode.bpp_specified);
|
||||
|
||||
FAIL_ON(mode.rb);
|
||||
FAIL_ON(mode.cvt);
|
||||
FAIL_ON(mode.interlace);
|
||||
FAIL_ON(mode.margins);
|
||||
FAIL_ON(mode.force != DRM_FORCE_ON);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_cmdline_test_force_D_only_not_digital(void *ignored)
|
||||
{
|
||||
struct drm_cmdline_mode mode = { };
|
||||
|
||||
FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
|
||||
&no_connector,
|
||||
&mode));
|
||||
FAIL_ON(mode.specified);
|
||||
FAIL_ON(mode.refresh_specified);
|
||||
FAIL_ON(mode.bpp_specified);
|
||||
|
||||
FAIL_ON(mode.rb);
|
||||
FAIL_ON(mode.cvt);
|
||||
FAIL_ON(mode.interlace);
|
||||
FAIL_ON(mode.margins);
|
||||
FAIL_ON(mode.force != DRM_FORCE_ON);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_connector connector_hdmi = {
|
||||
.connector_type = DRM_MODE_CONNECTOR_HDMIB,
|
||||
};
|
||||
|
||||
static int drm_cmdline_test_force_D_only_hdmi(void *ignored)
|
||||
{
|
||||
struct drm_cmdline_mode mode = { };
|
||||
|
||||
FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
|
||||
&connector_hdmi,
|
||||
&mode));
|
||||
FAIL_ON(mode.specified);
|
||||
FAIL_ON(mode.refresh_specified);
|
||||
FAIL_ON(mode.bpp_specified);
|
||||
|
||||
FAIL_ON(mode.rb);
|
||||
FAIL_ON(mode.cvt);
|
||||
FAIL_ON(mode.interlace);
|
||||
FAIL_ON(mode.margins);
|
||||
FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_connector connector_dvi = {
|
||||
.connector_type = DRM_MODE_CONNECTOR_DVII,
|
||||
};
|
||||
|
||||
static int drm_cmdline_test_force_D_only_dvi(void *ignored)
|
||||
{
|
||||
struct drm_cmdline_mode mode = { };
|
||||
|
||||
FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
|
||||
&connector_dvi,
|
||||
&mode));
|
||||
FAIL_ON(mode.specified);
|
||||
FAIL_ON(mode.refresh_specified);
|
||||
FAIL_ON(mode.bpp_specified);
|
||||
|
||||
FAIL_ON(mode.rb);
|
||||
FAIL_ON(mode.cvt);
|
||||
FAIL_ON(mode.interlace);
|
||||
FAIL_ON(mode.margins);
|
||||
FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_cmdline_test_force_d_only(void *ignored)
|
||||
{
|
||||
struct drm_cmdline_mode mode = { };
|
||||
|
||||
FAIL_ON(!drm_mode_parse_command_line_for_connector("d",
|
||||
&no_connector,
|
||||
&mode));
|
||||
FAIL_ON(mode.specified);
|
||||
FAIL_ON(mode.refresh_specified);
|
||||
FAIL_ON(mode.bpp_specified);
|
||||
|
||||
FAIL_ON(mode.rb);
|
||||
FAIL_ON(mode.cvt);
|
||||
FAIL_ON(mode.interlace);
|
||||
FAIL_ON(mode.margins);
|
||||
FAIL_ON(mode.force != DRM_FORCE_OFF);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_cmdline_test_margin_only(void *ignored)
|
||||
{
|
||||
struct drm_cmdline_mode mode = { };
|
||||
|
||||
FAIL_ON(drm_mode_parse_command_line_for_connector("m",
|
||||
&no_connector,
|
||||
&mode));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_cmdline_test_interlace_only(void *ignored)
|
||||
{
|
||||
struct drm_cmdline_mode mode = { };
|
||||
|
||||
FAIL_ON(drm_mode_parse_command_line_for_connector("i",
|
||||
&no_connector,
|
||||
&mode));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_cmdline_test_res(void *ignored)
|
||||
{
|
||||
struct drm_cmdline_mode mode = { };
|
||||
|
||||
@@ -353,7 +353,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
|
||||
!!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
|
||||
if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
|
||||
kfree(reply);
|
||||
|
||||
reply = NULL;
|
||||
if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
|
||||
/* A checkpoint occurred. Retry. */
|
||||
continue;
|
||||
@@ -377,7 +377,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
|
||||
|
||||
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
|
||||
kfree(reply);
|
||||
|
||||
reply = NULL;
|
||||
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
|
||||
/* A checkpoint occurred. Retry. */
|
||||
continue;
|
||||
@@ -389,10 +389,8 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
|
||||
break;
|
||||
}
|
||||
|
||||
if (retries == RETRIES) {
|
||||
kfree(reply);
|
||||
if (!reply)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*msg_len = reply_len;
|
||||
*msg = reply;
|
||||
|
||||
@@ -1143,6 +1143,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
|
||||
iommu_completion_wait(iommu);
|
||||
}
|
||||
|
||||
static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
|
||||
{
|
||||
struct iommu_cmd cmd;
|
||||
|
||||
build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
|
||||
dom_id, 1);
|
||||
iommu_queue_command(iommu, &cmd);
|
||||
|
||||
iommu_completion_wait(iommu);
|
||||
}
|
||||
|
||||
static void amd_iommu_flush_all(struct amd_iommu *iommu)
|
||||
{
|
||||
struct iommu_cmd cmd;
|
||||
@@ -1424,18 +1435,21 @@ static void free_pagetable(struct protection_domain *domain)
|
||||
* another level increases the size of the address space by 9 bits to a size up
|
||||
* to 64 bits.
|
||||
*/
|
||||
static bool increase_address_space(struct protection_domain *domain,
|
||||
static void increase_address_space(struct protection_domain *domain,
|
||||
gfp_t gfp)
|
||||
{
|
||||
unsigned long flags;
|
||||
u64 *pte;
|
||||
|
||||
if (domain->mode == PAGE_MODE_6_LEVEL)
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
|
||||
if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
|
||||
/* address space already 64 bit large */
|
||||
return false;
|
||||
goto out;
|
||||
|
||||
pte = (void *)get_zeroed_page(gfp);
|
||||
if (!pte)
|
||||
return false;
|
||||
goto out;
|
||||
|
||||
*pte = PM_LEVEL_PDE(domain->mode,
|
||||
iommu_virt_to_phys(domain->pt_root));
|
||||
@@ -1443,7 +1457,10 @@ static bool increase_address_space(struct protection_domain *domain,
|
||||
domain->mode += 1;
|
||||
domain->updated = true;
|
||||
|
||||
return true;
|
||||
out:
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static u64 *alloc_pte(struct protection_domain *domain,
|
||||
@@ -1873,6 +1890,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
|
||||
{
|
||||
u64 pte_root = 0;
|
||||
u64 flags = 0;
|
||||
u32 old_domid;
|
||||
|
||||
if (domain->mode != PAGE_MODE_NONE)
|
||||
pte_root = iommu_virt_to_phys(domain->pt_root);
|
||||
@@ -1922,8 +1940,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
|
||||
flags &= ~DEV_DOMID_MASK;
|
||||
flags |= domain->id;
|
||||
|
||||
old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
|
||||
amd_iommu_dev_table[devid].data[1] = flags;
|
||||
amd_iommu_dev_table[devid].data[0] = pte_root;
|
||||
|
||||
/*
|
||||
* A kdump kernel might be replacing a domain ID that was copied from
|
||||
* the previous kernel--if so, it needs to flush the translation cache
|
||||
* entries for the old domain ID that is being overwritten
|
||||
*/
|
||||
if (old_domid) {
|
||||
struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
|
||||
|
||||
amd_iommu_flush_tlb_domid(iommu, old_domid);
|
||||
}
|
||||
}
|
||||
|
||||
static void clear_dte_entry(u16 devid)
|
||||
|
||||
@@ -339,6 +339,8 @@ static void domain_exit(struct dmar_domain *domain);
|
||||
static void domain_remove_dev_info(struct dmar_domain *domain);
|
||||
static void dmar_remove_one_dev_info(struct device *dev);
|
||||
static void __dmar_remove_one_dev_info(struct device_domain_info *info);
|
||||
static void domain_context_clear(struct intel_iommu *iommu,
|
||||
struct device *dev);
|
||||
static int domain_detach_iommu(struct dmar_domain *domain,
|
||||
struct intel_iommu *iommu);
|
||||
static bool device_is_rmrr_locked(struct device *dev);
|
||||
@@ -2105,9 +2107,26 @@ out_unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct domain_context_mapping_data {
|
||||
struct dmar_domain *domain;
|
||||
struct intel_iommu *iommu;
|
||||
struct pasid_table *table;
|
||||
};
|
||||
|
||||
static int domain_context_mapping_cb(struct pci_dev *pdev,
|
||||
u16 alias, void *opaque)
|
||||
{
|
||||
struct domain_context_mapping_data *data = opaque;
|
||||
|
||||
return domain_context_mapping_one(data->domain, data->iommu,
|
||||
data->table, PCI_BUS_NUM(alias),
|
||||
alias & 0xff);
|
||||
}
|
||||
|
||||
static int
|
||||
domain_context_mapping(struct dmar_domain *domain, struct device *dev)
|
||||
{
|
||||
struct domain_context_mapping_data data;
|
||||
struct pasid_table *table;
|
||||
struct intel_iommu *iommu;
|
||||
u8 bus, devfn;
|
||||
@@ -2117,7 +2136,17 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
|
||||
return -ENODEV;
|
||||
|
||||
table = intel_pasid_get_table(dev);
|
||||
return domain_context_mapping_one(domain, iommu, table, bus, devfn);
|
||||
|
||||
if (!dev_is_pci(dev))
|
||||
return domain_context_mapping_one(domain, iommu, table,
|
||||
bus, devfn);
|
||||
|
||||
data.domain = domain;
|
||||
data.iommu = iommu;
|
||||
data.table = table;
|
||||
|
||||
return pci_for_each_dma_alias(to_pci_dev(dev),
|
||||
&domain_context_mapping_cb, &data);
|
||||
}
|
||||
|
||||
static int domain_context_mapped_cb(struct pci_dev *pdev,
|
||||
@@ -4759,6 +4788,28 @@ out_free_dmar:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
|
||||
{
|
||||
struct intel_iommu *iommu = opaque;
|
||||
|
||||
domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* NB - intel-iommu lacks any sort of reference counting for the users of
|
||||
* dependent devices. If multiple endpoints have intersecting dependent
|
||||
* devices, unbinding the driver from any one of them will possibly leave
|
||||
* the others unable to operate.
|
||||
*/
|
||||
static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
|
||||
{
|
||||
if (!iommu || !dev || !dev_is_pci(dev))
|
||||
return;
|
||||
|
||||
pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
|
||||
}
|
||||
|
||||
static void __dmar_remove_one_dev_info(struct device_domain_info *info)
|
||||
{
|
||||
struct dmar_domain *domain;
|
||||
@@ -4779,7 +4830,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
|
||||
PASID_RID2PASID);
|
||||
|
||||
iommu_disable_dev_iotlb(info);
|
||||
domain_context_clear_one(iommu, info->bus, info->devfn);
|
||||
domain_context_clear(iommu, info->dev);
|
||||
intel_pasid_free_table(info->dev);
|
||||
}
|
||||
|
||||
|
||||
@@ -100,24 +100,19 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
|
||||
}
|
||||
|
||||
static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
|
||||
unsigned long address, unsigned long pages, int ih, int gl)
|
||||
unsigned long address, unsigned long pages, int ih)
|
||||
{
|
||||
struct qi_desc desc;
|
||||
|
||||
if (pages == -1) {
|
||||
/* For global kernel pages we have to flush them in *all* PASIDs
|
||||
* because that's the only option the hardware gives us. Despite
|
||||
* the fact that they are actually only accessible through one. */
|
||||
if (gl)
|
||||
desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
|
||||
QI_EIOTLB_DID(sdev->did) |
|
||||
QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) |
|
||||
QI_EIOTLB_TYPE;
|
||||
else
|
||||
desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
|
||||
QI_EIOTLB_DID(sdev->did) |
|
||||
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
|
||||
QI_EIOTLB_TYPE;
|
||||
/*
|
||||
* Do PASID granu IOTLB invalidation if page selective capability is
|
||||
* not available.
|
||||
*/
|
||||
if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) {
|
||||
desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
|
||||
QI_EIOTLB_DID(sdev->did) |
|
||||
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
|
||||
QI_EIOTLB_TYPE;
|
||||
desc.qw1 = 0;
|
||||
} else {
|
||||
int mask = ilog2(__roundup_pow_of_two(pages));
|
||||
@@ -127,7 +122,6 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
|
||||
QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
|
||||
QI_EIOTLB_TYPE;
|
||||
desc.qw1 = QI_EIOTLB_ADDR(address) |
|
||||
QI_EIOTLB_GL(gl) |
|
||||
QI_EIOTLB_IH(ih) |
|
||||
QI_EIOTLB_AM(mask);
|
||||
}
|
||||
@@ -162,13 +156,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
|
||||
}
|
||||
|
||||
static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
|
||||
unsigned long pages, int ih, int gl)
|
||||
unsigned long pages, int ih)
|
||||
{
|
||||
struct intel_svm_dev *sdev;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(sdev, &svm->devs, list)
|
||||
intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl);
|
||||
intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
@@ -180,7 +174,7 @@ static void intel_invalidate_range(struct mmu_notifier *mn,
|
||||
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
|
||||
|
||||
intel_flush_svm_range(svm, start,
|
||||
(end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0);
|
||||
(end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
|
||||
}
|
||||
|
||||
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||
@@ -203,7 +197,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(sdev, &svm->devs, list) {
|
||||
intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
|
||||
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
|
||||
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
@@ -425,7 +419,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
|
||||
* large and has to be physically contiguous. So it's
|
||||
* hard to be as defensive as we might like. */
|
||||
intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
|
||||
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
|
||||
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
|
||||
kfree_rcu(sdev, rcu);
|
||||
|
||||
if (list_empty(&svm->devs)) {
|
||||
|
||||
@@ -564,7 +564,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
|
||||
if (index == EXT_CSD_SANITIZE_START)
|
||||
cmd.sanitize_busy = true;
|
||||
|
||||
err = mmc_wait_for_cmd(host, &cmd, 0);
|
||||
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
||||
@@ -1331,7 +1331,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
|
||||
}
|
||||
}
|
||||
|
||||
if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
|
||||
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
|
||||
if (!try_fill_recv(vi, rq, GFP_ATOMIC))
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
}
|
||||
|
||||
@@ -655,6 +655,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
||||
resource_size_t start, size;
|
||||
struct nd_region *nd_region;
|
||||
unsigned long npfns, align;
|
||||
u32 end_trunc;
|
||||
struct nd_pfn_sb *pfn_sb;
|
||||
phys_addr_t offset;
|
||||
const char *sig;
|
||||
@@ -696,6 +697,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
||||
size = resource_size(&nsio->res);
|
||||
npfns = PHYS_PFN(size - SZ_8K);
|
||||
align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT));
|
||||
end_trunc = start + size - ALIGN_DOWN(start + size, align);
|
||||
if (nd_pfn->mode == PFN_MODE_PMEM) {
|
||||
/*
|
||||
* The altmap should be padded out to the block size used
|
||||
@@ -714,7 +716,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
npfns = PHYS_PFN(size - offset);
|
||||
npfns = PHYS_PFN(size - offset - end_trunc);
|
||||
pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
|
||||
pfn_sb->dataoff = cpu_to_le64(offset);
|
||||
pfn_sb->npfns = cpu_to_le64(npfns);
|
||||
@@ -723,6 +725,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
||||
memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
|
||||
pfn_sb->version_major = cpu_to_le16(1);
|
||||
pfn_sb->version_minor = cpu_to_le16(3);
|
||||
pfn_sb->end_trunc = cpu_to_le32(end_trunc);
|
||||
pfn_sb->align = cpu_to_le32(nd_pfn->align);
|
||||
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
|
||||
pfn_sb->checksum = cpu_to_le64(checksum);
|
||||
|
||||
@@ -5715,7 +5715,7 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
|
||||
* 0 = Set nr_hw_queues by the number of CPUs or HW queues.
|
||||
* 1,128 = Manually specify the maximum nr_hw_queue value to be set,
|
||||
*
|
||||
* Value range is [0,128]. Default value is 8.
|
||||
* Value range is [0,256]. Default value is 8.
|
||||
*/
|
||||
LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
|
||||
LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
|
||||
|
||||
@@ -46,7 +46,7 @@
|
||||
|
||||
/* FCP MQ queue count limiting */
|
||||
#define LPFC_FCP_MQ_THRESHOLD_MIN 0
|
||||
#define LPFC_FCP_MQ_THRESHOLD_MAX 128
|
||||
#define LPFC_FCP_MQ_THRESHOLD_MAX 256
|
||||
#define LPFC_FCP_MQ_THRESHOLD_DEF 8
|
||||
|
||||
/* Common buffer size to accomidate SCSI and NVME IO buffers */
|
||||
|
||||
@@ -630,6 +630,9 @@ int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len,
|
||||
struct geni_wrapper *wrapper = se->wrapper;
|
||||
u32 val;
|
||||
|
||||
if (!wrapper)
|
||||
return -EINVAL;
|
||||
|
||||
*iova = dma_map_single(wrapper->dev, buf, len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(wrapper->dev, *iova))
|
||||
return -EIO;
|
||||
@@ -663,6 +666,9 @@ int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
|
||||
struct geni_wrapper *wrapper = se->wrapper;
|
||||
u32 val;
|
||||
|
||||
if (!wrapper)
|
||||
return -EINVAL;
|
||||
|
||||
*iova = dma_map_single(wrapper->dev, buf, len, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(wrapper->dev, *iova))
|
||||
return -EIO;
|
||||
|
||||
@@ -22,6 +22,12 @@
|
||||
* Using this limit prevents one virtqueue from starving others. */
|
||||
#define VHOST_TEST_WEIGHT 0x80000
|
||||
|
||||
/* Max number of packets transferred before requeueing the job.
|
||||
* Using this limit prevents one virtqueue from starving others with
|
||||
* pkts.
|
||||
*/
|
||||
#define VHOST_TEST_PKT_WEIGHT 256
|
||||
|
||||
enum {
|
||||
VHOST_TEST_VQ = 0,
|
||||
VHOST_TEST_VQ_MAX = 1,
|
||||
@@ -80,10 +86,8 @@ static void handle_vq(struct vhost_test *n)
|
||||
}
|
||||
vhost_add_used_and_signal(&n->dev, vq, head, 0);
|
||||
total_len += len;
|
||||
if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
|
||||
vhost_poll_queue(&vq->poll);
|
||||
if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&vq->mutex);
|
||||
@@ -115,7 +119,8 @@ static int vhost_test_open(struct inode *inode, struct file *f)
|
||||
dev = &n->dev;
|
||||
vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
|
||||
n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
|
||||
vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
|
||||
vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
|
||||
VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT);
|
||||
|
||||
f->private_data = n;
|
||||
|
||||
|
||||
@@ -203,7 +203,6 @@ EXPORT_SYMBOL_GPL(vhost_poll_init);
|
||||
int vhost_poll_start(struct vhost_poll *poll, struct file *file)
|
||||
{
|
||||
__poll_t mask;
|
||||
int ret = 0;
|
||||
|
||||
if (poll->wqh)
|
||||
return 0;
|
||||
@@ -213,10 +212,10 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
|
||||
vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
|
||||
if (mask & EPOLLERR) {
|
||||
vhost_poll_stop(poll);
|
||||
ret = -EINVAL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vhost_poll_start);
|
||||
|
||||
@@ -298,160 +297,6 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
|
||||
__vhost_vq_meta_reset(d->vqs[i]);
|
||||
}
|
||||
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
static void vhost_map_unprefetch(struct vhost_map *map)
|
||||
{
|
||||
kfree(map->pages);
|
||||
map->pages = NULL;
|
||||
map->npages = 0;
|
||||
map->addr = NULL;
|
||||
}
|
||||
|
||||
static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq)
|
||||
{
|
||||
struct vhost_map *map[VHOST_NUM_ADDRS];
|
||||
int i;
|
||||
|
||||
spin_lock(&vq->mmu_lock);
|
||||
for (i = 0; i < VHOST_NUM_ADDRS; i++) {
|
||||
map[i] = rcu_dereference_protected(vq->maps[i],
|
||||
lockdep_is_held(&vq->mmu_lock));
|
||||
if (map[i])
|
||||
rcu_assign_pointer(vq->maps[i], NULL);
|
||||
}
|
||||
spin_unlock(&vq->mmu_lock);
|
||||
|
||||
synchronize_rcu();
|
||||
|
||||
for (i = 0; i < VHOST_NUM_ADDRS; i++)
|
||||
if (map[i])
|
||||
vhost_map_unprefetch(map[i]);
|
||||
|
||||
}
|
||||
|
||||
static void vhost_reset_vq_maps(struct vhost_virtqueue *vq)
|
||||
{
|
||||
int i;
|
||||
|
||||
vhost_uninit_vq_maps(vq);
|
||||
for (i = 0; i < VHOST_NUM_ADDRS; i++)
|
||||
vq->uaddrs[i].size = 0;
|
||||
}
|
||||
|
||||
static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
if (unlikely(!uaddr->size))
|
||||
return false;
|
||||
|
||||
return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size);
|
||||
}
|
||||
|
||||
static void vhost_invalidate_vq_start(struct vhost_virtqueue *vq,
|
||||
int index,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
struct vhost_uaddr *uaddr = &vq->uaddrs[index];
|
||||
struct vhost_map *map;
|
||||
int i;
|
||||
|
||||
if (!vhost_map_range_overlap(uaddr, start, end))
|
||||
return;
|
||||
|
||||
spin_lock(&vq->mmu_lock);
|
||||
++vq->invalidate_count;
|
||||
|
||||
map = rcu_dereference_protected(vq->maps[index],
|
||||
lockdep_is_held(&vq->mmu_lock));
|
||||
if (map) {
|
||||
if (uaddr->write) {
|
||||
for (i = 0; i < map->npages; i++)
|
||||
set_page_dirty(map->pages[i]);
|
||||
}
|
||||
rcu_assign_pointer(vq->maps[index], NULL);
|
||||
}
|
||||
spin_unlock(&vq->mmu_lock);
|
||||
|
||||
if (map) {
|
||||
synchronize_rcu();
|
||||
vhost_map_unprefetch(map);
|
||||
}
|
||||
}
|
||||
|
||||
static void vhost_invalidate_vq_end(struct vhost_virtqueue *vq,
|
||||
int index,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
if (!vhost_map_range_overlap(&vq->uaddrs[index], start, end))
|
||||
return;
|
||||
|
||||
spin_lock(&vq->mmu_lock);
|
||||
--vq->invalidate_count;
|
||||
spin_unlock(&vq->mmu_lock);
|
||||
}
|
||||
|
||||
static int vhost_invalidate_range_start(struct mmu_notifier *mn,
|
||||
const struct mmu_notifier_range *range)
|
||||
{
|
||||
struct vhost_dev *dev = container_of(mn, struct vhost_dev,
|
||||
mmu_notifier);
|
||||
int i, j;
|
||||
|
||||
if (!mmu_notifier_range_blockable(range))
|
||||
return -EAGAIN;
|
||||
|
||||
for (i = 0; i < dev->nvqs; i++) {
|
||||
struct vhost_virtqueue *vq = dev->vqs[i];
|
||||
|
||||
for (j = 0; j < VHOST_NUM_ADDRS; j++)
|
||||
vhost_invalidate_vq_start(vq, j,
|
||||
range->start,
|
||||
range->end);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vhost_invalidate_range_end(struct mmu_notifier *mn,
|
||||
const struct mmu_notifier_range *range)
|
||||
{
|
||||
struct vhost_dev *dev = container_of(mn, struct vhost_dev,
|
||||
mmu_notifier);
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < dev->nvqs; i++) {
|
||||
struct vhost_virtqueue *vq = dev->vqs[i];
|
||||
|
||||
for (j = 0; j < VHOST_NUM_ADDRS; j++)
|
||||
vhost_invalidate_vq_end(vq, j,
|
||||
range->start,
|
||||
range->end);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct mmu_notifier_ops vhost_mmu_notifier_ops = {
|
||||
.invalidate_range_start = vhost_invalidate_range_start,
|
||||
.invalidate_range_end = vhost_invalidate_range_end,
|
||||
};
|
||||
|
||||
static void vhost_init_maps(struct vhost_dev *dev)
|
||||
{
|
||||
struct vhost_virtqueue *vq;
|
||||
int i, j;
|
||||
|
||||
dev->mmu_notifier.ops = &vhost_mmu_notifier_ops;
|
||||
|
||||
for (i = 0; i < dev->nvqs; ++i) {
|
||||
vq = dev->vqs[i];
|
||||
for (j = 0; j < VHOST_NUM_ADDRS; j++)
|
||||
RCU_INIT_POINTER(vq->maps[j], NULL);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void vhost_vq_reset(struct vhost_dev *dev,
|
||||
struct vhost_virtqueue *vq)
|
||||
{
|
||||
@@ -480,11 +325,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
|
||||
vq->busyloop_timeout = 0;
|
||||
vq->umem = NULL;
|
||||
vq->iotlb = NULL;
|
||||
vq->invalidate_count = 0;
|
||||
__vhost_vq_meta_reset(vq);
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
vhost_reset_vq_maps(vq);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int vhost_worker(void *data)
|
||||
@@ -634,9 +475,7 @@ void vhost_dev_init(struct vhost_dev *dev,
|
||||
INIT_LIST_HEAD(&dev->read_list);
|
||||
INIT_LIST_HEAD(&dev->pending_list);
|
||||
spin_lock_init(&dev->iotlb_lock);
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
vhost_init_maps(dev);
|
||||
#endif
|
||||
|
||||
|
||||
for (i = 0; i < dev->nvqs; ++i) {
|
||||
vq = dev->vqs[i];
|
||||
@@ -645,7 +484,6 @@ void vhost_dev_init(struct vhost_dev *dev,
|
||||
vq->heads = NULL;
|
||||
vq->dev = dev;
|
||||
mutex_init(&vq->mutex);
|
||||
spin_lock_init(&vq->mmu_lock);
|
||||
vhost_vq_reset(dev, vq);
|
||||
if (vq->handle_kick)
|
||||
vhost_poll_init(&vq->poll, vq->handle_kick,
|
||||
@@ -725,18 +563,7 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
|
||||
if (err)
|
||||
goto err_cgroup;
|
||||
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
err = mmu_notifier_register(&dev->mmu_notifier, dev->mm);
|
||||
if (err)
|
||||
goto err_mmu_notifier;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
err_mmu_notifier:
|
||||
vhost_dev_free_iovecs(dev);
|
||||
#endif
|
||||
err_cgroup:
|
||||
kthread_stop(worker);
|
||||
dev->worker = NULL;
|
||||
@@ -827,107 +654,6 @@ static void vhost_clear_msg(struct vhost_dev *dev)
|
||||
spin_unlock(&dev->iotlb_lock);
|
||||
}
|
||||
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
static void vhost_setup_uaddr(struct vhost_virtqueue *vq,
|
||||
int index, unsigned long uaddr,
|
||||
size_t size, bool write)
|
||||
{
|
||||
struct vhost_uaddr *addr = &vq->uaddrs[index];
|
||||
|
||||
addr->uaddr = uaddr;
|
||||
addr->size = size;
|
||||
addr->write = write;
|
||||
}
|
||||
|
||||
static void vhost_setup_vq_uaddr(struct vhost_virtqueue *vq)
|
||||
{
|
||||
vhost_setup_uaddr(vq, VHOST_ADDR_DESC,
|
||||
(unsigned long)vq->desc,
|
||||
vhost_get_desc_size(vq, vq->num),
|
||||
false);
|
||||
vhost_setup_uaddr(vq, VHOST_ADDR_AVAIL,
|
||||
(unsigned long)vq->avail,
|
||||
vhost_get_avail_size(vq, vq->num),
|
||||
false);
|
||||
vhost_setup_uaddr(vq, VHOST_ADDR_USED,
|
||||
(unsigned long)vq->used,
|
||||
vhost_get_used_size(vq, vq->num),
|
||||
true);
|
||||
}
|
||||
|
||||
static int vhost_map_prefetch(struct vhost_virtqueue *vq,
|
||||
int index)
|
||||
{
|
||||
struct vhost_map *map;
|
||||
struct vhost_uaddr *uaddr = &vq->uaddrs[index];
|
||||
struct page **pages;
|
||||
int npages = DIV_ROUND_UP(uaddr->size, PAGE_SIZE);
|
||||
int npinned;
|
||||
void *vaddr, *v;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
spin_lock(&vq->mmu_lock);
|
||||
|
||||
err = -EFAULT;
|
||||
if (vq->invalidate_count)
|
||||
goto err;
|
||||
|
||||
err = -ENOMEM;
|
||||
map = kmalloc(sizeof(*map), GFP_ATOMIC);
|
||||
if (!map)
|
||||
goto err;
|
||||
|
||||
pages = kmalloc_array(npages, sizeof(struct page *), GFP_ATOMIC);
|
||||
if (!pages)
|
||||
goto err_pages;
|
||||
|
||||
err = EFAULT;
|
||||
npinned = __get_user_pages_fast(uaddr->uaddr, npages,
|
||||
uaddr->write, pages);
|
||||
if (npinned > 0)
|
||||
release_pages(pages, npinned);
|
||||
if (npinned != npages)
|
||||
goto err_gup;
|
||||
|
||||
for (i = 0; i < npinned; i++)
|
||||
if (PageHighMem(pages[i]))
|
||||
goto err_gup;
|
||||
|
||||
vaddr = v = page_address(pages[0]);
|
||||
|
||||
/* For simplicity, fallback to userspace address if VA is not
|
||||
* contigious.
|
||||
*/
|
||||
for (i = 1; i < npinned; i++) {
|
||||
v += PAGE_SIZE;
|
||||
if (v != page_address(pages[i]))
|
||||
goto err_gup;
|
||||
}
|
||||
|
||||
map->addr = vaddr + (uaddr->uaddr & (PAGE_SIZE - 1));
|
||||
map->npages = npages;
|
||||
map->pages = pages;
|
||||
|
||||
rcu_assign_pointer(vq->maps[index], map);
|
||||
/* No need for a synchronize_rcu(). This function should be
|
||||
* called by dev->worker so we are serialized with all
|
||||
* readers.
|
||||
*/
|
||||
spin_unlock(&vq->mmu_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
err_gup:
|
||||
kfree(pages);
|
||||
err_pages:
|
||||
kfree(map);
|
||||
err:
|
||||
spin_unlock(&vq->mmu_lock);
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
void vhost_dev_cleanup(struct vhost_dev *dev)
|
||||
{
|
||||
int i;
|
||||
@@ -957,16 +683,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
|
||||
kthread_stop(dev->worker);
|
||||
dev->worker = NULL;
|
||||
}
|
||||
if (dev->mm) {
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
mmu_notifier_unregister(&dev->mmu_notifier, dev->mm);
|
||||
#endif
|
||||
if (dev->mm)
|
||||
mmput(dev->mm);
|
||||
}
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
for (i = 0; i < dev->nvqs; i++)
|
||||
vhost_uninit_vq_maps(dev->vqs[i]);
|
||||
#endif
|
||||
dev->mm = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
|
||||
@@ -1195,26 +913,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
|
||||
|
||||
static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
|
||||
{
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
struct vhost_map *map;
|
||||
struct vring_used *used;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
rcu_read_lock();
|
||||
|
||||
map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
|
||||
if (likely(map)) {
|
||||
used = map->addr;
|
||||
*((__virtio16 *)&used->ring[vq->num]) =
|
||||
cpu_to_vhost16(vq, vq->avail_idx);
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
|
||||
vhost_avail_event(vq));
|
||||
}
|
||||
@@ -1223,27 +921,6 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq,
|
||||
struct vring_used_elem *head, int idx,
|
||||
int count)
|
||||
{
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
struct vhost_map *map;
|
||||
struct vring_used *used;
|
||||
size_t size;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
rcu_read_lock();
|
||||
|
||||
map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
|
||||
if (likely(map)) {
|
||||
used = map->addr;
|
||||
size = count * sizeof(*head);
|
||||
memcpy(used->ring + idx, head, size);
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
return vhost_copy_to_user(vq, vq->used->ring + idx, head,
|
||||
count * sizeof(*head));
|
||||
}
|
||||
@@ -1251,25 +928,6 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq,
|
||||
static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
|
||||
|
||||
{
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
struct vhost_map *map;
|
||||
struct vring_used *used;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
rcu_read_lock();
|
||||
|
||||
map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
|
||||
if (likely(map)) {
|
||||
used = map->addr;
|
||||
used->flags = cpu_to_vhost16(vq, vq->used_flags);
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
|
||||
&vq->used->flags);
|
||||
}
|
||||
@@ -1277,25 +935,6 @@ static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
|
||||
static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
|
||||
|
||||
{
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
struct vhost_map *map;
|
||||
struct vring_used *used;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
rcu_read_lock();
|
||||
|
||||
map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
|
||||
if (likely(map)) {
|
||||
used = map->addr;
|
||||
used->idx = cpu_to_vhost16(vq, vq->last_used_idx);
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
|
||||
&vq->used->idx);
|
||||
}
|
||||
@@ -1341,50 +980,12 @@ static void vhost_dev_unlock_vqs(struct vhost_dev *d)
|
||||
static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
|
||||
__virtio16 *idx)
|
||||
{
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
struct vhost_map *map;
|
||||
struct vring_avail *avail;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
rcu_read_lock();
|
||||
|
||||
map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
|
||||
if (likely(map)) {
|
||||
avail = map->addr;
|
||||
*idx = avail->idx;
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
return vhost_get_avail(vq, *idx, &vq->avail->idx);
|
||||
}
|
||||
|
||||
static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
|
||||
__virtio16 *head, int idx)
|
||||
{
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
struct vhost_map *map;
|
||||
struct vring_avail *avail;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
rcu_read_lock();
|
||||
|
||||
map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
|
||||
if (likely(map)) {
|
||||
avail = map->addr;
|
||||
*head = avail->ring[idx & (vq->num - 1)];
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
return vhost_get_avail(vq, *head,
|
||||
&vq->avail->ring[idx & (vq->num - 1)]);
|
||||
}
|
||||
@@ -1392,98 +993,24 @@ static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
|
||||
static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
|
||||
__virtio16 *flags)
|
||||
{
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
struct vhost_map *map;
|
||||
struct vring_avail *avail;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
rcu_read_lock();
|
||||
|
||||
map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
|
||||
if (likely(map)) {
|
||||
avail = map->addr;
|
||||
*flags = avail->flags;
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
return vhost_get_avail(vq, *flags, &vq->avail->flags);
|
||||
}
|
||||
|
||||
static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
|
||||
__virtio16 *event)
|
||||
{
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
struct vhost_map *map;
|
||||
struct vring_avail *avail;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
rcu_read_lock();
|
||||
map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
|
||||
if (likely(map)) {
|
||||
avail = map->addr;
|
||||
*event = (__virtio16)avail->ring[vq->num];
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
return vhost_get_avail(vq, *event, vhost_used_event(vq));
|
||||
}
|
||||
|
||||
static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
|
||||
__virtio16 *idx)
|
||||
{
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
struct vhost_map *map;
|
||||
struct vring_used *used;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
rcu_read_lock();
|
||||
|
||||
map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
|
||||
if (likely(map)) {
|
||||
used = map->addr;
|
||||
*idx = used->idx;
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
return vhost_get_used(vq, *idx, &vq->used->idx);
|
||||
}
|
||||
|
||||
static inline int vhost_get_desc(struct vhost_virtqueue *vq,
|
||||
struct vring_desc *desc, int idx)
|
||||
{
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
struct vhost_map *map;
|
||||
struct vring_desc *d;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
rcu_read_lock();
|
||||
|
||||
map = rcu_dereference(vq->maps[VHOST_ADDR_DESC]);
|
||||
if (likely(map)) {
|
||||
d = map->addr;
|
||||
*desc = *(d + idx);
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
|
||||
}
|
||||
|
||||
@@ -1824,32 +1351,12 @@ static bool iotlb_access_ok(struct vhost_virtqueue *vq,
|
||||
return true;
|
||||
}
|
||||
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
static void vhost_vq_map_prefetch(struct vhost_virtqueue *vq)
|
||||
{
|
||||
struct vhost_map __rcu *map;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < VHOST_NUM_ADDRS; i++) {
|
||||
rcu_read_lock();
|
||||
map = rcu_dereference(vq->maps[i]);
|
||||
rcu_read_unlock();
|
||||
if (unlikely(!map))
|
||||
vhost_map_prefetch(vq, i);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
int vq_meta_prefetch(struct vhost_virtqueue *vq)
|
||||
{
|
||||
unsigned int num = vq->num;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
vhost_vq_map_prefetch(vq);
|
||||
#endif
|
||||
if (!vq->iotlb)
|
||||
return 1;
|
||||
}
|
||||
|
||||
return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
|
||||
vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
|
||||
@@ -2060,16 +1567,6 @@ static long vhost_vring_set_num_addr(struct vhost_dev *d,
|
||||
|
||||
mutex_lock(&vq->mutex);
|
||||
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
/* Unregister MMU notifer to allow invalidation callback
|
||||
* can access vq->uaddrs[] without holding a lock.
|
||||
*/
|
||||
if (d->mm)
|
||||
mmu_notifier_unregister(&d->mmu_notifier, d->mm);
|
||||
|
||||
vhost_uninit_vq_maps(vq);
|
||||
#endif
|
||||
|
||||
switch (ioctl) {
|
||||
case VHOST_SET_VRING_NUM:
|
||||
r = vhost_vring_set_num(d, vq, argp);
|
||||
@@ -2081,13 +1578,6 @@ static long vhost_vring_set_num_addr(struct vhost_dev *d,
|
||||
BUG();
|
||||
}
|
||||
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
vhost_setup_vq_uaddr(vq);
|
||||
|
||||
if (d->mm)
|
||||
mmu_notifier_register(&d->mmu_notifier, d->mm);
|
||||
#endif
|
||||
|
||||
mutex_unlock(&vq->mutex);
|
||||
|
||||
return r;
|
||||
|
||||
@@ -12,9 +12,6 @@
|
||||
#include <linux/virtio_config.h>
|
||||
#include <linux/virtio_ring.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
struct vhost_work;
|
||||
typedef void (*vhost_work_fn_t)(struct vhost_work *work);
|
||||
@@ -83,24 +80,6 @@ enum vhost_uaddr_type {
|
||||
VHOST_NUM_ADDRS = 3,
|
||||
};
|
||||
|
||||
struct vhost_map {
|
||||
int npages;
|
||||
void *addr;
|
||||
struct page **pages;
|
||||
};
|
||||
|
||||
struct vhost_uaddr {
|
||||
unsigned long uaddr;
|
||||
size_t size;
|
||||
bool write;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0
|
||||
#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
|
||||
#else
|
||||
#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
|
||||
#endif
|
||||
|
||||
/* The virtqueue structure describes a queue attached to a device. */
|
||||
struct vhost_virtqueue {
|
||||
struct vhost_dev *dev;
|
||||
@@ -111,22 +90,7 @@ struct vhost_virtqueue {
|
||||
struct vring_desc __user *desc;
|
||||
struct vring_avail __user *avail;
|
||||
struct vring_used __user *used;
|
||||
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
/* Read by memory accessors, modified by meta data
|
||||
* prefetching, MMU notifier and vring ioctl().
|
||||
* Synchonrized through mmu_lock (writers) and RCU (writers
|
||||
* and readers).
|
||||
*/
|
||||
struct vhost_map __rcu *maps[VHOST_NUM_ADDRS];
|
||||
/* Read by MMU notifier, modified by vring ioctl(),
|
||||
* synchronized through MMU notifier
|
||||
* registering/unregistering.
|
||||
*/
|
||||
struct vhost_uaddr uaddrs[VHOST_NUM_ADDRS];
|
||||
#endif
|
||||
const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS];
|
||||
|
||||
struct file *kick;
|
||||
struct eventfd_ctx *call_ctx;
|
||||
struct eventfd_ctx *error_ctx;
|
||||
@@ -181,8 +145,6 @@ struct vhost_virtqueue {
|
||||
bool user_be;
|
||||
#endif
|
||||
u32 busyloop_timeout;
|
||||
spinlock_t mmu_lock;
|
||||
int invalidate_count;
|
||||
};
|
||||
|
||||
struct vhost_msg_node {
|
||||
@@ -196,9 +158,6 @@ struct vhost_msg_node {
|
||||
|
||||
struct vhost_dev {
|
||||
struct mm_struct *mm;
|
||||
#ifdef CONFIG_MMU_NOTIFIER
|
||||
struct mmu_notifier mmu_notifier;
|
||||
#endif
|
||||
struct mutex mutex;
|
||||
struct vhost_virtqueue **vqs;
|
||||
int nvqs;
|
||||
|
||||
@@ -20,6 +20,15 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
struct configfs_fragment {
|
||||
atomic_t frag_count;
|
||||
struct rw_semaphore frag_sem;
|
||||
bool frag_dead;
|
||||
};
|
||||
|
||||
void put_fragment(struct configfs_fragment *);
|
||||
struct configfs_fragment *get_fragment(struct configfs_fragment *);
|
||||
|
||||
struct configfs_dirent {
|
||||
atomic_t s_count;
|
||||
int s_dependent_count;
|
||||
@@ -34,6 +43,7 @@ struct configfs_dirent {
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
int s_depth;
|
||||
#endif
|
||||
struct configfs_fragment *s_frag;
|
||||
};
|
||||
|
||||
#define CONFIGFS_ROOT 0x0001
|
||||
@@ -61,8 +71,8 @@ extern int configfs_create(struct dentry *, umode_t mode, void (*init)(struct in
|
||||
extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
|
||||
extern int configfs_create_bin_file(struct config_item *,
|
||||
const struct configfs_bin_attribute *);
|
||||
extern int configfs_make_dirent(struct configfs_dirent *,
|
||||
struct dentry *, void *, umode_t, int);
|
||||
extern int configfs_make_dirent(struct configfs_dirent *, struct dentry *,
|
||||
void *, umode_t, int, struct configfs_fragment *);
|
||||
extern int configfs_dirent_is_ready(struct configfs_dirent *);
|
||||
|
||||
extern void configfs_hash_and_remove(struct dentry * dir, const char * name);
|
||||
@@ -137,6 +147,7 @@ static inline void release_configfs_dirent(struct configfs_dirent * sd)
|
||||
{
|
||||
if (!(sd->s_type & CONFIGFS_ROOT)) {
|
||||
kfree(sd->s_iattr);
|
||||
put_fragment(sd->s_frag);
|
||||
kmem_cache_free(configfs_dir_cachep, sd);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -151,11 +151,38 @@ configfs_adjust_dir_dirent_depth_after_populate(struct configfs_dirent *sd)
|
||||
|
||||
#endif /* CONFIG_LOCKDEP */
|
||||
|
||||
static struct configfs_fragment *new_fragment(void)
|
||||
{
|
||||
struct configfs_fragment *p;
|
||||
|
||||
p = kmalloc(sizeof(struct configfs_fragment), GFP_KERNEL);
|
||||
if (p) {
|
||||
atomic_set(&p->frag_count, 1);
|
||||
init_rwsem(&p->frag_sem);
|
||||
p->frag_dead = false;
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
void put_fragment(struct configfs_fragment *frag)
|
||||
{
|
||||
if (frag && atomic_dec_and_test(&frag->frag_count))
|
||||
kfree(frag);
|
||||
}
|
||||
|
||||
struct configfs_fragment *get_fragment(struct configfs_fragment *frag)
|
||||
{
|
||||
if (likely(frag))
|
||||
atomic_inc(&frag->frag_count);
|
||||
return frag;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocates a new configfs_dirent and links it to the parent configfs_dirent
|
||||
*/
|
||||
static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *parent_sd,
|
||||
void *element, int type)
|
||||
void *element, int type,
|
||||
struct configfs_fragment *frag)
|
||||
{
|
||||
struct configfs_dirent * sd;
|
||||
|
||||
@@ -175,6 +202,7 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *paren
|
||||
kmem_cache_free(configfs_dir_cachep, sd);
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
sd->s_frag = get_fragment(frag);
|
||||
list_add(&sd->s_sibling, &parent_sd->s_children);
|
||||
spin_unlock(&configfs_dirent_lock);
|
||||
|
||||
@@ -209,11 +237,11 @@ static int configfs_dirent_exists(struct configfs_dirent *parent_sd,
|
||||
|
||||
int configfs_make_dirent(struct configfs_dirent * parent_sd,
|
||||
struct dentry * dentry, void * element,
|
||||
umode_t mode, int type)
|
||||
umode_t mode, int type, struct configfs_fragment *frag)
|
||||
{
|
||||
struct configfs_dirent * sd;
|
||||
|
||||
sd = configfs_new_dirent(parent_sd, element, type);
|
||||
sd = configfs_new_dirent(parent_sd, element, type, frag);
|
||||
if (IS_ERR(sd))
|
||||
return PTR_ERR(sd);
|
||||
|
||||
@@ -260,7 +288,8 @@ static void init_symlink(struct inode * inode)
|
||||
* until it is validated by configfs_dir_set_ready()
|
||||
*/
|
||||
|
||||
static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
|
||||
static int configfs_create_dir(struct config_item *item, struct dentry *dentry,
|
||||
struct configfs_fragment *frag)
|
||||
{
|
||||
int error;
|
||||
umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
|
||||
@@ -273,7 +302,8 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
|
||||
return error;
|
||||
|
||||
error = configfs_make_dirent(p->d_fsdata, dentry, item, mode,
|
||||
CONFIGFS_DIR | CONFIGFS_USET_CREATING);
|
||||
CONFIGFS_DIR | CONFIGFS_USET_CREATING,
|
||||
frag);
|
||||
if (unlikely(error))
|
||||
return error;
|
||||
|
||||
@@ -338,9 +368,10 @@ int configfs_create_link(struct configfs_symlink *sl,
|
||||
{
|
||||
int err = 0;
|
||||
umode_t mode = S_IFLNK | S_IRWXUGO;
|
||||
struct configfs_dirent *p = parent->d_fsdata;
|
||||
|
||||
err = configfs_make_dirent(parent->d_fsdata, dentry, sl, mode,
|
||||
CONFIGFS_ITEM_LINK);
|
||||
err = configfs_make_dirent(p, dentry, sl, mode,
|
||||
CONFIGFS_ITEM_LINK, p->s_frag);
|
||||
if (!err) {
|
||||
err = configfs_create(dentry, mode, init_symlink);
|
||||
if (err) {
|
||||
@@ -599,7 +630,8 @@ static int populate_attrs(struct config_item *item)
|
||||
|
||||
static int configfs_attach_group(struct config_item *parent_item,
|
||||
struct config_item *item,
|
||||
struct dentry *dentry);
|
||||
struct dentry *dentry,
|
||||
struct configfs_fragment *frag);
|
||||
static void configfs_detach_group(struct config_item *item);
|
||||
|
||||
static void detach_groups(struct config_group *group)
|
||||
@@ -647,7 +679,8 @@ static void detach_groups(struct config_group *group)
|
||||
* try using vfs_mkdir. Just a thought.
|
||||
*/
|
||||
static int create_default_group(struct config_group *parent_group,
|
||||
struct config_group *group)
|
||||
struct config_group *group,
|
||||
struct configfs_fragment *frag)
|
||||
{
|
||||
int ret;
|
||||
struct configfs_dirent *sd;
|
||||
@@ -663,7 +696,7 @@ static int create_default_group(struct config_group *parent_group,
|
||||
d_add(child, NULL);
|
||||
|
||||
ret = configfs_attach_group(&parent_group->cg_item,
|
||||
&group->cg_item, child);
|
||||
&group->cg_item, child, frag);
|
||||
if (!ret) {
|
||||
sd = child->d_fsdata;
|
||||
sd->s_type |= CONFIGFS_USET_DEFAULT;
|
||||
@@ -677,13 +710,14 @@ static int create_default_group(struct config_group *parent_group,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int populate_groups(struct config_group *group)
|
||||
static int populate_groups(struct config_group *group,
|
||||
struct configfs_fragment *frag)
|
||||
{
|
||||
struct config_group *new_group;
|
||||
int ret = 0;
|
||||
|
||||
list_for_each_entry(new_group, &group->default_groups, group_entry) {
|
||||
ret = create_default_group(group, new_group);
|
||||
ret = create_default_group(group, new_group, frag);
|
||||
if (ret) {
|
||||
detach_groups(group);
|
||||
break;
|
||||
@@ -797,11 +831,12 @@ static void link_group(struct config_group *parent_group, struct config_group *g
|
||||
*/
|
||||
static int configfs_attach_item(struct config_item *parent_item,
|
||||
struct config_item *item,
|
||||
struct dentry *dentry)
|
||||
struct dentry *dentry,
|
||||
struct configfs_fragment *frag)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = configfs_create_dir(item, dentry);
|
||||
ret = configfs_create_dir(item, dentry, frag);
|
||||
if (!ret) {
|
||||
ret = populate_attrs(item);
|
||||
if (ret) {
|
||||
@@ -831,12 +866,13 @@ static void configfs_detach_item(struct config_item *item)
|
||||
|
||||
static int configfs_attach_group(struct config_item *parent_item,
|
||||
struct config_item *item,
|
||||
struct dentry *dentry)
|
||||
struct dentry *dentry,
|
||||
struct configfs_fragment *frag)
|
||||
{
|
||||
int ret;
|
||||
struct configfs_dirent *sd;
|
||||
|
||||
ret = configfs_attach_item(parent_item, item, dentry);
|
||||
ret = configfs_attach_item(parent_item, item, dentry, frag);
|
||||
if (!ret) {
|
||||
sd = dentry->d_fsdata;
|
||||
sd->s_type |= CONFIGFS_USET_DIR;
|
||||
@@ -852,7 +888,7 @@ static int configfs_attach_group(struct config_item *parent_item,
|
||||
*/
|
||||
inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
|
||||
configfs_adjust_dir_dirent_depth_before_populate(sd);
|
||||
ret = populate_groups(to_config_group(item));
|
||||
ret = populate_groups(to_config_group(item), frag);
|
||||
if (ret) {
|
||||
configfs_detach_item(item);
|
||||
d_inode(dentry)->i_flags |= S_DEAD;
|
||||
@@ -1247,6 +1283,7 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
|
||||
struct configfs_dirent *sd;
|
||||
const struct config_item_type *type;
|
||||
struct module *subsys_owner = NULL, *new_item_owner = NULL;
|
||||
struct configfs_fragment *frag;
|
||||
char *name;
|
||||
|
||||
sd = dentry->d_parent->d_fsdata;
|
||||
@@ -1265,6 +1302,12 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
|
||||
goto out;
|
||||
}
|
||||
|
||||
frag = new_fragment();
|
||||
if (!frag) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Get a working ref for the duration of this function */
|
||||
parent_item = configfs_get_config_item(dentry->d_parent);
|
||||
type = parent_item->ci_type;
|
||||
@@ -1367,9 +1410,9 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
|
||||
spin_unlock(&configfs_dirent_lock);
|
||||
|
||||
if (group)
|
||||
ret = configfs_attach_group(parent_item, item, dentry);
|
||||
ret = configfs_attach_group(parent_item, item, dentry, frag);
|
||||
else
|
||||
ret = configfs_attach_item(parent_item, item, dentry);
|
||||
ret = configfs_attach_item(parent_item, item, dentry, frag);
|
||||
|
||||
spin_lock(&configfs_dirent_lock);
|
||||
sd->s_type &= ~CONFIGFS_USET_IN_MKDIR;
|
||||
@@ -1406,6 +1449,7 @@ out_put:
|
||||
* reference.
|
||||
*/
|
||||
config_item_put(parent_item);
|
||||
put_fragment(frag);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
@@ -1417,6 +1461,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
|
||||
struct config_item *item;
|
||||
struct configfs_subsystem *subsys;
|
||||
struct configfs_dirent *sd;
|
||||
struct configfs_fragment *frag;
|
||||
struct module *subsys_owner = NULL, *dead_item_owner = NULL;
|
||||
int ret;
|
||||
|
||||
@@ -1474,6 +1519,16 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
|
||||
}
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
frag = sd->s_frag;
|
||||
if (down_write_killable(&frag->frag_sem)) {
|
||||
spin_lock(&configfs_dirent_lock);
|
||||
configfs_detach_rollback(dentry);
|
||||
spin_unlock(&configfs_dirent_lock);
|
||||
return -EINTR;
|
||||
}
|
||||
frag->frag_dead = true;
|
||||
up_write(&frag->frag_sem);
|
||||
|
||||
/* Get a working ref for the duration of this function */
|
||||
item = configfs_get_config_item(dentry);
|
||||
|
||||
@@ -1574,7 +1629,7 @@ static int configfs_dir_open(struct inode *inode, struct file *file)
|
||||
*/
|
||||
err = -ENOENT;
|
||||
if (configfs_dirent_is_ready(parent_sd)) {
|
||||
file->private_data = configfs_new_dirent(parent_sd, NULL, 0);
|
||||
file->private_data = configfs_new_dirent(parent_sd, NULL, 0, NULL);
|
||||
if (IS_ERR(file->private_data))
|
||||
err = PTR_ERR(file->private_data);
|
||||
else
|
||||
@@ -1732,8 +1787,13 @@ int configfs_register_group(struct config_group *parent_group,
|
||||
{
|
||||
struct configfs_subsystem *subsys = parent_group->cg_subsys;
|
||||
struct dentry *parent;
|
||||
struct configfs_fragment *frag;
|
||||
int ret;
|
||||
|
||||
frag = new_fragment();
|
||||
if (!frag)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&subsys->su_mutex);
|
||||
link_group(parent_group, group);
|
||||
mutex_unlock(&subsys->su_mutex);
|
||||
@@ -1741,7 +1801,7 @@ int configfs_register_group(struct config_group *parent_group,
|
||||
parent = parent_group->cg_item.ci_dentry;
|
||||
|
||||
inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
|
||||
ret = create_default_group(parent_group, group);
|
||||
ret = create_default_group(parent_group, group, frag);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
|
||||
@@ -1749,12 +1809,14 @@ int configfs_register_group(struct config_group *parent_group,
|
||||
configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
|
||||
spin_unlock(&configfs_dirent_lock);
|
||||
inode_unlock(d_inode(parent));
|
||||
put_fragment(frag);
|
||||
return 0;
|
||||
err_out:
|
||||
inode_unlock(d_inode(parent));
|
||||
mutex_lock(&subsys->su_mutex);
|
||||
unlink_group(group);
|
||||
mutex_unlock(&subsys->su_mutex);
|
||||
put_fragment(frag);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(configfs_register_group);
|
||||
@@ -1770,16 +1832,12 @@ void configfs_unregister_group(struct config_group *group)
|
||||
struct configfs_subsystem *subsys = group->cg_subsys;
|
||||
struct dentry *dentry = group->cg_item.ci_dentry;
|
||||
struct dentry *parent = group->cg_item.ci_parent->ci_dentry;
|
||||
struct configfs_dirent *sd = dentry->d_fsdata;
|
||||
struct configfs_fragment *frag = sd->s_frag;
|
||||
|
||||
mutex_lock(&subsys->su_mutex);
|
||||
if (!group->cg_item.ci_parent->ci_group) {
|
||||
/*
|
||||
* The parent has already been unlinked and detached
|
||||
* due to a rmdir.
|
||||
*/
|
||||
goto unlink_group;
|
||||
}
|
||||
mutex_unlock(&subsys->su_mutex);
|
||||
down_write(&frag->frag_sem);
|
||||
frag->frag_dead = true;
|
||||
up_write(&frag->frag_sem);
|
||||
|
||||
inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
|
||||
spin_lock(&configfs_dirent_lock);
|
||||
@@ -1796,7 +1854,6 @@ void configfs_unregister_group(struct config_group *group)
|
||||
dput(dentry);
|
||||
|
||||
mutex_lock(&subsys->su_mutex);
|
||||
unlink_group:
|
||||
unlink_group(group);
|
||||
mutex_unlock(&subsys->su_mutex);
|
||||
}
|
||||
@@ -1853,10 +1910,17 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
|
||||
struct dentry *dentry;
|
||||
struct dentry *root;
|
||||
struct configfs_dirent *sd;
|
||||
struct configfs_fragment *frag;
|
||||
|
||||
frag = new_fragment();
|
||||
if (!frag)
|
||||
return -ENOMEM;
|
||||
|
||||
root = configfs_pin_fs();
|
||||
if (IS_ERR(root))
|
||||
if (IS_ERR(root)) {
|
||||
put_fragment(frag);
|
||||
return PTR_ERR(root);
|
||||
}
|
||||
|
||||
if (!group->cg_item.ci_name)
|
||||
group->cg_item.ci_name = group->cg_item.ci_namebuf;
|
||||
@@ -1872,7 +1936,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
|
||||
d_add(dentry, NULL);
|
||||
|
||||
err = configfs_attach_group(sd->s_element, &group->cg_item,
|
||||
dentry);
|
||||
dentry, frag);
|
||||
if (err) {
|
||||
BUG_ON(d_inode(dentry));
|
||||
d_drop(dentry);
|
||||
@@ -1890,6 +1954,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
|
||||
unlink_group(group);
|
||||
configfs_release_fs();
|
||||
}
|
||||
put_fragment(frag);
|
||||
|
||||
return err;
|
||||
}
|
||||
@@ -1899,12 +1964,18 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
|
||||
struct config_group *group = &subsys->su_group;
|
||||
struct dentry *dentry = group->cg_item.ci_dentry;
|
||||
struct dentry *root = dentry->d_sb->s_root;
|
||||
struct configfs_dirent *sd = dentry->d_fsdata;
|
||||
struct configfs_fragment *frag = sd->s_frag;
|
||||
|
||||
if (dentry->d_parent != root) {
|
||||
pr_err("Tried to unregister non-subsystem!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
down_write(&frag->frag_sem);
|
||||
frag->frag_dead = true;
|
||||
up_write(&frag->frag_sem);
|
||||
|
||||
inode_lock_nested(d_inode(root),
|
||||
I_MUTEX_PARENT);
|
||||
inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
|
||||
|
||||
@@ -39,40 +39,44 @@ struct configfs_buffer {
|
||||
bool write_in_progress;
|
||||
char *bin_buffer;
|
||||
int bin_buffer_size;
|
||||
int cb_max_size;
|
||||
struct config_item *item;
|
||||
struct module *owner;
|
||||
union {
|
||||
struct configfs_attribute *attr;
|
||||
struct configfs_bin_attribute *bin_attr;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* fill_read_buffer - allocate and fill buffer from item.
|
||||
* @dentry: dentry pointer.
|
||||
* @buffer: data buffer for file.
|
||||
*
|
||||
* Allocate @buffer->page, if it hasn't been already, then call the
|
||||
* config_item's show() method to fill the buffer with this attribute's
|
||||
* data.
|
||||
* This is called only once, on the file's first read.
|
||||
*/
|
||||
static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buffer)
|
||||
static inline struct configfs_fragment *to_frag(struct file *file)
|
||||
{
|
||||
struct configfs_attribute * attr = to_attr(dentry);
|
||||
struct config_item * item = to_item(dentry->d_parent);
|
||||
int ret = 0;
|
||||
ssize_t count;
|
||||
struct configfs_dirent *sd = file->f_path.dentry->d_fsdata;
|
||||
|
||||
return sd->s_frag;
|
||||
}
|
||||
|
||||
static int fill_read_buffer(struct file *file, struct configfs_buffer *buffer)
|
||||
{
|
||||
struct configfs_fragment *frag = to_frag(file);
|
||||
ssize_t count = -ENOENT;
|
||||
|
||||
if (!buffer->page)
|
||||
buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
|
||||
if (!buffer->page)
|
||||
return -ENOMEM;
|
||||
|
||||
count = attr->show(item, buffer->page);
|
||||
down_read(&frag->frag_sem);
|
||||
if (!frag->frag_dead)
|
||||
count = buffer->attr->show(buffer->item, buffer->page);
|
||||
up_read(&frag->frag_sem);
|
||||
|
||||
BUG_ON(count > (ssize_t)SIMPLE_ATTR_SIZE);
|
||||
if (count >= 0) {
|
||||
buffer->needs_read_fill = 0;
|
||||
buffer->count = count;
|
||||
} else
|
||||
ret = count;
|
||||
return ret;
|
||||
if (count < 0)
|
||||
return count;
|
||||
if (WARN_ON_ONCE(count > (ssize_t)SIMPLE_ATTR_SIZE))
|
||||
return -EIO;
|
||||
buffer->needs_read_fill = 0;
|
||||
buffer->count = count;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -97,12 +101,13 @@ static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buf
|
||||
static ssize_t
|
||||
configfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
|
||||
{
|
||||
struct configfs_buffer * buffer = file->private_data;
|
||||
struct configfs_buffer *buffer = file->private_data;
|
||||
ssize_t retval = 0;
|
||||
|
||||
mutex_lock(&buffer->mutex);
|
||||
if (buffer->needs_read_fill) {
|
||||
if ((retval = fill_read_buffer(file->f_path.dentry,buffer)))
|
||||
retval = fill_read_buffer(file, buffer);
|
||||
if (retval)
|
||||
goto out;
|
||||
}
|
||||
pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n",
|
||||
@@ -138,10 +143,8 @@ static ssize_t
|
||||
configfs_read_bin_file(struct file *file, char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct configfs_fragment *frag = to_frag(file);
|
||||
struct configfs_buffer *buffer = file->private_data;
|
||||
struct dentry *dentry = file->f_path.dentry;
|
||||
struct config_item *item = to_item(dentry->d_parent);
|
||||
struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
|
||||
ssize_t retval = 0;
|
||||
ssize_t len = min_t(size_t, count, PAGE_SIZE);
|
||||
|
||||
@@ -156,14 +159,19 @@ configfs_read_bin_file(struct file *file, char __user *buf,
|
||||
|
||||
if (buffer->needs_read_fill) {
|
||||
/* perform first read with buf == NULL to get extent */
|
||||
len = bin_attr->read(item, NULL, 0);
|
||||
down_read(&frag->frag_sem);
|
||||
if (!frag->frag_dead)
|
||||
len = buffer->bin_attr->read(buffer->item, NULL, 0);
|
||||
else
|
||||
len = -ENOENT;
|
||||
up_read(&frag->frag_sem);
|
||||
if (len <= 0) {
|
||||
retval = len;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* do not exceed the maximum value */
|
||||
if (bin_attr->cb_max_size && len > bin_attr->cb_max_size) {
|
||||
if (buffer->cb_max_size && len > buffer->cb_max_size) {
|
||||
retval = -EFBIG;
|
||||
goto out;
|
||||
}
|
||||
@@ -176,7 +184,13 @@ configfs_read_bin_file(struct file *file, char __user *buf,
|
||||
buffer->bin_buffer_size = len;
|
||||
|
||||
/* perform second read to fill buffer */
|
||||
len = bin_attr->read(item, buffer->bin_buffer, len);
|
||||
down_read(&frag->frag_sem);
|
||||
if (!frag->frag_dead)
|
||||
len = buffer->bin_attr->read(buffer->item,
|
||||
buffer->bin_buffer, len);
|
||||
else
|
||||
len = -ENOENT;
|
||||
up_read(&frag->frag_sem);
|
||||
if (len < 0) {
|
||||
retval = len;
|
||||
vfree(buffer->bin_buffer);
|
||||
@@ -226,25 +240,17 @@ fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size
|
||||
return error ? -EFAULT : count;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* flush_write_buffer - push buffer to config_item.
|
||||
* @dentry: dentry to the attribute
|
||||
* @buffer: data buffer for file.
|
||||
* @count: number of bytes
|
||||
*
|
||||
* Get the correct pointers for the config_item and the attribute we're
|
||||
* dealing with, then call the store() method for the attribute,
|
||||
* passing the buffer that we acquired in fill_write_buffer().
|
||||
*/
|
||||
|
||||
static int
|
||||
flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size_t count)
|
||||
flush_write_buffer(struct file *file, struct configfs_buffer *buffer, size_t count)
|
||||
{
|
||||
struct configfs_attribute * attr = to_attr(dentry);
|
||||
struct config_item * item = to_item(dentry->d_parent);
|
||||
struct configfs_fragment *frag = to_frag(file);
|
||||
int res = -ENOENT;
|
||||
|
||||
return attr->store(item, buffer->page, count);
|
||||
down_read(&frag->frag_sem);
|
||||
if (!frag->frag_dead)
|
||||
res = buffer->attr->store(buffer->item, buffer->page, count);
|
||||
up_read(&frag->frag_sem);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
@@ -268,13 +274,13 @@ flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size
|
||||
static ssize_t
|
||||
configfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
|
||||
{
|
||||
struct configfs_buffer * buffer = file->private_data;
|
||||
struct configfs_buffer *buffer = file->private_data;
|
||||
ssize_t len;
|
||||
|
||||
mutex_lock(&buffer->mutex);
|
||||
len = fill_write_buffer(buffer, buf, count);
|
||||
if (len > 0)
|
||||
len = flush_write_buffer(file->f_path.dentry, buffer, len);
|
||||
len = flush_write_buffer(file, buffer, len);
|
||||
if (len > 0)
|
||||
*ppos += len;
|
||||
mutex_unlock(&buffer->mutex);
|
||||
@@ -299,8 +305,6 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct configfs_buffer *buffer = file->private_data;
|
||||
struct dentry *dentry = file->f_path.dentry;
|
||||
struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
|
||||
void *tbuf = NULL;
|
||||
ssize_t len;
|
||||
|
||||
@@ -316,8 +320,8 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
|
||||
/* buffer grows? */
|
||||
if (*ppos + count > buffer->bin_buffer_size) {
|
||||
|
||||
if (bin_attr->cb_max_size &&
|
||||
*ppos + count > bin_attr->cb_max_size) {
|
||||
if (buffer->cb_max_size &&
|
||||
*ppos + count > buffer->cb_max_size) {
|
||||
len = -EFBIG;
|
||||
goto out;
|
||||
}
|
||||
@@ -349,31 +353,51 @@ out:
|
||||
return len;
|
||||
}
|
||||
|
||||
static int check_perm(struct inode * inode, struct file * file, int type)
|
||||
static int __configfs_open_file(struct inode *inode, struct file *file, int type)
|
||||
{
|
||||
struct config_item *item = configfs_get_config_item(file->f_path.dentry->d_parent);
|
||||
struct configfs_attribute * attr = to_attr(file->f_path.dentry);
|
||||
struct configfs_bin_attribute *bin_attr = NULL;
|
||||
struct configfs_buffer * buffer;
|
||||
struct configfs_item_operations * ops = NULL;
|
||||
int error = 0;
|
||||
struct dentry *dentry = file->f_path.dentry;
|
||||
struct configfs_fragment *frag = to_frag(file);
|
||||
struct configfs_attribute *attr;
|
||||
struct configfs_buffer *buffer;
|
||||
int error;
|
||||
|
||||
if (!item || !attr)
|
||||
goto Einval;
|
||||
error = -ENOMEM;
|
||||
buffer = kzalloc(sizeof(struct configfs_buffer), GFP_KERNEL);
|
||||
if (!buffer)
|
||||
goto out;
|
||||
|
||||
if (type & CONFIGFS_ITEM_BIN_ATTR)
|
||||
bin_attr = to_bin_attr(file->f_path.dentry);
|
||||
error = -ENOENT;
|
||||
down_read(&frag->frag_sem);
|
||||
if (unlikely(frag->frag_dead))
|
||||
goto out_free_buffer;
|
||||
|
||||
/* Grab the module reference for this attribute if we have one */
|
||||
if (!try_module_get(attr->ca_owner)) {
|
||||
error = -ENODEV;
|
||||
goto Done;
|
||||
error = -EINVAL;
|
||||
buffer->item = to_item(dentry->d_parent);
|
||||
if (!buffer->item)
|
||||
goto out_free_buffer;
|
||||
|
||||
attr = to_attr(dentry);
|
||||
if (!attr)
|
||||
goto out_put_item;
|
||||
|
||||
if (type & CONFIGFS_ITEM_BIN_ATTR) {
|
||||
buffer->bin_attr = to_bin_attr(dentry);
|
||||
buffer->cb_max_size = buffer->bin_attr->cb_max_size;
|
||||
} else {
|
||||
buffer->attr = attr;
|
||||
}
|
||||
|
||||
if (item->ci_type)
|
||||
ops = item->ci_type->ct_item_ops;
|
||||
else
|
||||
goto Eaccess;
|
||||
buffer->owner = attr->ca_owner;
|
||||
/* Grab the module reference for this attribute if we have one */
|
||||
error = -ENODEV;
|
||||
if (!try_module_get(buffer->owner))
|
||||
goto out_put_item;
|
||||
|
||||
error = -EACCES;
|
||||
if (!buffer->item->ci_type)
|
||||
goto out_put_module;
|
||||
|
||||
buffer->ops = buffer->item->ci_type->ct_item_ops;
|
||||
|
||||
/* File needs write support.
|
||||
* The inode's perms must say it's ok,
|
||||
@@ -381,13 +405,11 @@ static int check_perm(struct inode * inode, struct file * file, int type)
|
||||
*/
|
||||
if (file->f_mode & FMODE_WRITE) {
|
||||
if (!(inode->i_mode & S_IWUGO))
|
||||
goto Eaccess;
|
||||
|
||||
goto out_put_module;
|
||||
if ((type & CONFIGFS_ITEM_ATTR) && !attr->store)
|
||||
goto Eaccess;
|
||||
|
||||
if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->write)
|
||||
goto Eaccess;
|
||||
goto out_put_module;
|
||||
if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->write)
|
||||
goto out_put_module;
|
||||
}
|
||||
|
||||
/* File needs read support.
|
||||
@@ -396,92 +418,72 @@ static int check_perm(struct inode * inode, struct file * file, int type)
|
||||
*/
|
||||
if (file->f_mode & FMODE_READ) {
|
||||
if (!(inode->i_mode & S_IRUGO))
|
||||
goto Eaccess;
|
||||
|
||||
goto out_put_module;
|
||||
if ((type & CONFIGFS_ITEM_ATTR) && !attr->show)
|
||||
goto Eaccess;
|
||||
|
||||
if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->read)
|
||||
goto Eaccess;
|
||||
goto out_put_module;
|
||||
if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->read)
|
||||
goto out_put_module;
|
||||
}
|
||||
|
||||
/* No error? Great, allocate a buffer for the file, and store it
|
||||
* it in file->private_data for easy access.
|
||||
*/
|
||||
buffer = kzalloc(sizeof(struct configfs_buffer),GFP_KERNEL);
|
||||
if (!buffer) {
|
||||
error = -ENOMEM;
|
||||
goto Enomem;
|
||||
}
|
||||
mutex_init(&buffer->mutex);
|
||||
buffer->needs_read_fill = 1;
|
||||
buffer->read_in_progress = false;
|
||||
buffer->write_in_progress = false;
|
||||
buffer->ops = ops;
|
||||
file->private_data = buffer;
|
||||
goto Done;
|
||||
up_read(&frag->frag_sem);
|
||||
return 0;
|
||||
|
||||
Einval:
|
||||
error = -EINVAL;
|
||||
goto Done;
|
||||
Eaccess:
|
||||
error = -EACCES;
|
||||
Enomem:
|
||||
module_put(attr->ca_owner);
|
||||
Done:
|
||||
if (error && item)
|
||||
config_item_put(item);
|
||||
out_put_module:
|
||||
module_put(buffer->owner);
|
||||
out_put_item:
|
||||
config_item_put(buffer->item);
|
||||
out_free_buffer:
|
||||
up_read(&frag->frag_sem);
|
||||
kfree(buffer);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
static int configfs_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct config_item * item = to_item(filp->f_path.dentry->d_parent);
|
||||
struct configfs_attribute * attr = to_attr(filp->f_path.dentry);
|
||||
struct module * owner = attr->ca_owner;
|
||||
struct configfs_buffer * buffer = filp->private_data;
|
||||
struct configfs_buffer *buffer = filp->private_data;
|
||||
|
||||
if (item)
|
||||
config_item_put(item);
|
||||
/* After this point, attr should not be accessed. */
|
||||
module_put(owner);
|
||||
|
||||
if (buffer) {
|
||||
if (buffer->page)
|
||||
free_page((unsigned long)buffer->page);
|
||||
mutex_destroy(&buffer->mutex);
|
||||
kfree(buffer);
|
||||
}
|
||||
module_put(buffer->owner);
|
||||
if (buffer->page)
|
||||
free_page((unsigned long)buffer->page);
|
||||
mutex_destroy(&buffer->mutex);
|
||||
kfree(buffer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int configfs_open_file(struct inode *inode, struct file *filp)
|
||||
{
|
||||
return check_perm(inode, filp, CONFIGFS_ITEM_ATTR);
|
||||
return __configfs_open_file(inode, filp, CONFIGFS_ITEM_ATTR);
|
||||
}
|
||||
|
||||
static int configfs_open_bin_file(struct inode *inode, struct file *filp)
|
||||
{
|
||||
return check_perm(inode, filp, CONFIGFS_ITEM_BIN_ATTR);
|
||||
return __configfs_open_file(inode, filp, CONFIGFS_ITEM_BIN_ATTR);
|
||||
}
|
||||
|
||||
static int configfs_release_bin_file(struct inode *inode, struct file *filp)
|
||||
static int configfs_release_bin_file(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct configfs_buffer *buffer = filp->private_data;
|
||||
struct dentry *dentry = filp->f_path.dentry;
|
||||
struct config_item *item = to_item(dentry->d_parent);
|
||||
struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
|
||||
ssize_t len = 0;
|
||||
int ret;
|
||||
struct configfs_buffer *buffer = file->private_data;
|
||||
|
||||
buffer->read_in_progress = false;
|
||||
|
||||
if (buffer->write_in_progress) {
|
||||
struct configfs_fragment *frag = to_frag(file);
|
||||
buffer->write_in_progress = false;
|
||||
|
||||
len = bin_attr->write(item, buffer->bin_buffer,
|
||||
buffer->bin_buffer_size);
|
||||
|
||||
down_read(&frag->frag_sem);
|
||||
if (!frag->frag_dead) {
|
||||
/* result of ->release() is ignored */
|
||||
buffer->bin_attr->write(buffer->item,
|
||||
buffer->bin_buffer,
|
||||
buffer->bin_buffer_size);
|
||||
}
|
||||
up_read(&frag->frag_sem);
|
||||
/* vfree on NULL is safe */
|
||||
vfree(buffer->bin_buffer);
|
||||
buffer->bin_buffer = NULL;
|
||||
@@ -489,10 +491,8 @@ static int configfs_release_bin_file(struct inode *inode, struct file *filp)
|
||||
buffer->needs_read_fill = 1;
|
||||
}
|
||||
|
||||
ret = configfs_release(inode, filp);
|
||||
if (len < 0)
|
||||
return len;
|
||||
return ret;
|
||||
configfs_release(inode, file);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -527,7 +527,7 @@ int configfs_create_file(struct config_item * item, const struct configfs_attrib
|
||||
|
||||
inode_lock_nested(d_inode(dir), I_MUTEX_NORMAL);
|
||||
error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode,
|
||||
CONFIGFS_ITEM_ATTR);
|
||||
CONFIGFS_ITEM_ATTR, parent_sd->s_frag);
|
||||
inode_unlock(d_inode(dir));
|
||||
|
||||
return error;
|
||||
@@ -549,7 +549,7 @@ int configfs_create_bin_file(struct config_item *item,
|
||||
|
||||
inode_lock_nested(dir->d_inode, I_MUTEX_NORMAL);
|
||||
error = configfs_make_dirent(parent_sd, NULL, (void *) bin_attr, mode,
|
||||
CONFIGFS_ITEM_BIN_ATTR);
|
||||
CONFIGFS_ITEM_BIN_ATTR, parent_sd->s_frag);
|
||||
inode_unlock(dir->d_inode);
|
||||
|
||||
return error;
|
||||
|
||||
@@ -1403,11 +1403,12 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
|
||||
if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
|
||||
return 0;
|
||||
|
||||
/* No fileid? Just exit */
|
||||
if (!(fattr->valid & NFS_ATTR_FATTR_FILEID))
|
||||
return 0;
|
||||
if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
|
||||
/* Only a mounted-on-fileid? Just exit */
|
||||
if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
|
||||
return 0;
|
||||
/* Has the inode gone and changed behind our back? */
|
||||
if (nfsi->fileid != fattr->fileid) {
|
||||
} else if (nfsi->fileid != fattr->fileid) {
|
||||
/* Is this perhaps the mounted-on fileid? */
|
||||
if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
|
||||
nfsi->fileid == fattr->mounted_on_fileid)
|
||||
@@ -1807,11 +1808,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
|
||||
nfs_display_fhandle_hash(NFS_FH(inode)),
|
||||
atomic_read(&inode->i_count), fattr->valid);
|
||||
|
||||
/* No fileid? Just exit */
|
||||
if (!(fattr->valid & NFS_ATTR_FATTR_FILEID))
|
||||
return 0;
|
||||
if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
|
||||
/* Only a mounted-on-fileid? Just exit */
|
||||
if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
|
||||
return 0;
|
||||
/* Has the inode gone and changed behind our back? */
|
||||
if (nfsi->fileid != fattr->fileid) {
|
||||
} else if (nfsi->fileid != fattr->fileid) {
|
||||
/* Is this perhaps the mounted-on fileid? */
|
||||
if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
|
||||
nfsi->fileid == fattr->mounted_on_fileid)
|
||||
|
||||
@@ -24,7 +24,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
||||
long ______r; \
|
||||
static struct ftrace_likely_data \
|
||||
__aligned(4) \
|
||||
__section("_ftrace_annotated_branch") \
|
||||
__section(_ftrace_annotated_branch) \
|
||||
______f = { \
|
||||
.data.func = __func__, \
|
||||
.data.file = __FILE__, \
|
||||
@@ -60,7 +60,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
||||
#define __trace_if_value(cond) ({ \
|
||||
static struct ftrace_branch_data \
|
||||
__aligned(4) \
|
||||
__section("_ftrace_branch") \
|
||||
__section(_ftrace_branch) \
|
||||
__if_trace = { \
|
||||
.func = __func__, \
|
||||
.file = __FILE__, \
|
||||
@@ -118,7 +118,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
||||
".popsection\n\t"
|
||||
|
||||
/* Annotate a C jump table to allow objtool to follow the code flow */
|
||||
#define __annotate_jump_table __section(".rodata..c_jump_table")
|
||||
#define __annotate_jump_table __section(.rodata..c_jump_table)
|
||||
|
||||
#else
|
||||
#define annotate_reachable()
|
||||
@@ -298,7 +298,7 @@ unsigned long read_word_at_a_time(const void *addr)
|
||||
* visible to the compiler.
|
||||
*/
|
||||
#define __ADDRESSABLE(sym) \
|
||||
static void * __section(".discard.addressable") __used \
|
||||
static void * __section(.discard.addressable) __used \
|
||||
__PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
|
||||
|
||||
/**
|
||||
|
||||
@@ -48,7 +48,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
|
||||
{ "ELAN0618", 0 },
|
||||
{ "ELAN0619", 0 },
|
||||
{ "ELAN061A", 0 },
|
||||
{ "ELAN061B", 0 },
|
||||
/* { "ELAN061B", 0 }, not working on the Lenovo Legion Y7000 */
|
||||
{ "ELAN061C", 0 },
|
||||
{ "ELAN061D", 0 },
|
||||
{ "ELAN061E", 0 },
|
||||
|
||||
@@ -346,7 +346,6 @@ enum {
|
||||
#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
|
||||
|
||||
#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
|
||||
#define QI_EIOTLB_GL(gl) (((u64)gl) << 7)
|
||||
#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
|
||||
#define QI_EIOTLB_AM(am) (((u64)am))
|
||||
#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
|
||||
@@ -378,8 +377,6 @@ enum {
|
||||
#define QI_RESP_INVALID 0x1
|
||||
#define QI_RESP_FAILURE 0xf
|
||||
|
||||
#define QI_GRAN_ALL_ALL 0
|
||||
#define QI_GRAN_NONG_ALL 1
|
||||
#define QI_GRAN_NONG_PASID 2
|
||||
#define QI_GRAN_PSI_PASID 3
|
||||
|
||||
|
||||
@@ -5105,37 +5105,40 @@ out_unlock:
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int sched_read_attr(struct sched_attr __user *uattr,
|
||||
struct sched_attr *attr,
|
||||
unsigned int usize)
|
||||
/*
|
||||
* Copy the kernel size attribute structure (which might be larger
|
||||
* than what user-space knows about) to user-space.
|
||||
*
|
||||
* Note that all cases are valid: user-space buffer can be larger or
|
||||
* smaller than the kernel-space buffer. The usual case is that both
|
||||
* have the same size.
|
||||
*/
|
||||
static int
|
||||
sched_attr_copy_to_user(struct sched_attr __user *uattr,
|
||||
struct sched_attr *kattr,
|
||||
unsigned int usize)
|
||||
{
|
||||
int ret;
|
||||
unsigned int ksize = sizeof(*kattr);
|
||||
|
||||
if (!access_ok(uattr, usize))
|
||||
return -EFAULT;
|
||||
|
||||
/*
|
||||
* If we're handed a smaller struct than we know of,
|
||||
* ensure all the unknown bits are 0 - i.e. old
|
||||
* user-space does not get uncomplete information.
|
||||
* sched_getattr() ABI forwards and backwards compatibility:
|
||||
*
|
||||
* If usize == ksize then we just copy everything to user-space and all is good.
|
||||
*
|
||||
* If usize < ksize then we only copy as much as user-space has space for,
|
||||
* this keeps ABI compatibility as well. We skip the rest.
|
||||
*
|
||||
* If usize > ksize then user-space is using a newer version of the ABI,
|
||||
* which part the kernel doesn't know about. Just ignore it - tooling can
|
||||
* detect the kernel's knowledge of attributes from the attr->size value
|
||||
* which is set to ksize in this case.
|
||||
*/
|
||||
if (usize < sizeof(*attr)) {
|
||||
unsigned char *addr;
|
||||
unsigned char *end;
|
||||
kattr->size = min(usize, ksize);
|
||||
|
||||
addr = (void *)attr + usize;
|
||||
end = (void *)attr + sizeof(*attr);
|
||||
|
||||
for (; addr < end; addr++) {
|
||||
if (*addr)
|
||||
return -EFBIG;
|
||||
}
|
||||
|
||||
attr->size = usize;
|
||||
}
|
||||
|
||||
ret = copy_to_user(uattr, attr, attr->size);
|
||||
if (ret)
|
||||
if (copy_to_user(uattr, kattr, kattr->size))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
@@ -5145,20 +5148,18 @@ static int sched_read_attr(struct sched_attr __user *uattr,
|
||||
* sys_sched_getattr - similar to sched_getparam, but with sched_attr
|
||||
* @pid: the pid in question.
|
||||
* @uattr: structure containing the extended parameters.
|
||||
* @size: sizeof(attr) for fwd/bwd comp.
|
||||
* @usize: sizeof(attr) that user-space knows about, for forwards and backwards compatibility.
|
||||
* @flags: for future extension.
|
||||
*/
|
||||
SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
|
||||
unsigned int, size, unsigned int, flags)
|
||||
unsigned int, usize, unsigned int, flags)
|
||||
{
|
||||
struct sched_attr attr = {
|
||||
.size = sizeof(struct sched_attr),
|
||||
};
|
||||
struct sched_attr kattr = { };
|
||||
struct task_struct *p;
|
||||
int retval;
|
||||
|
||||
if (!uattr || pid < 0 || size > PAGE_SIZE ||
|
||||
size < SCHED_ATTR_SIZE_VER0 || flags)
|
||||
if (!uattr || pid < 0 || usize > PAGE_SIZE ||
|
||||
usize < SCHED_ATTR_SIZE_VER0 || flags)
|
||||
return -EINVAL;
|
||||
|
||||
rcu_read_lock();
|
||||
@@ -5171,25 +5172,24 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
|
||||
if (retval)
|
||||
goto out_unlock;
|
||||
|
||||
attr.sched_policy = p->policy;
|
||||
kattr.sched_policy = p->policy;
|
||||
if (p->sched_reset_on_fork)
|
||||
attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
|
||||
kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
|
||||
if (task_has_dl_policy(p))
|
||||
__getparam_dl(p, &attr);
|
||||
__getparam_dl(p, &kattr);
|
||||
else if (task_has_rt_policy(p))
|
||||
attr.sched_priority = p->rt_priority;
|
||||
kattr.sched_priority = p->rt_priority;
|
||||
else
|
||||
attr.sched_nice = task_nice(p);
|
||||
kattr.sched_nice = task_nice(p);
|
||||
|
||||
#ifdef CONFIG_UCLAMP_TASK
|
||||
attr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
|
||||
attr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
|
||||
kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
|
||||
kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
|
||||
#endif
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
retval = sched_read_attr(uattr, &attr, size);
|
||||
return retval;
|
||||
return sched_attr_copy_to_user(uattr, &kattr, usize);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
||||
@@ -4470,6 +4470,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
|
||||
if (likely(cfs_rq->runtime_remaining > 0))
|
||||
return;
|
||||
|
||||
if (cfs_rq->throttled)
|
||||
return;
|
||||
/*
|
||||
* if we're unable to extend our runtime we resched so that the active
|
||||
* hierarchy can be throttled
|
||||
@@ -4673,6 +4675,9 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
|
||||
if (!cfs_rq_throttled(cfs_rq))
|
||||
goto next;
|
||||
|
||||
/* By the above check, this should never be true */
|
||||
SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
|
||||
|
||||
runtime = -cfs_rq->runtime_remaining + 1;
|
||||
if (runtime > remaining)
|
||||
runtime = remaining;
|
||||
|
||||
@@ -124,7 +124,8 @@ EXPORT_SYMBOL_GPL(balloon_page_list_dequeue);
|
||||
struct page *balloon_page_alloc(void)
|
||||
{
|
||||
struct page *page = alloc_page(balloon_mapping_gfp_mask() |
|
||||
__GFP_NOMEMALLOC | __GFP_NORETRY);
|
||||
__GFP_NOMEMALLOC | __GFP_NORETRY |
|
||||
__GFP_NOWARN);
|
||||
return page;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(balloon_page_alloc);
|
||||
|
||||
@@ -66,6 +66,9 @@ static void request_key_auth_describe(const struct key *key,
|
||||
{
|
||||
struct request_key_auth *rka = dereference_key_rcu(key);
|
||||
|
||||
if (!rka)
|
||||
return;
|
||||
|
||||
seq_puts(m, "key:");
|
||||
seq_puts(m, key->description);
|
||||
if (key_is_positive(key))
|
||||
@@ -83,6 +86,9 @@ static long request_key_auth_read(const struct key *key,
|
||||
size_t datalen;
|
||||
long ret;
|
||||
|
||||
if (!rka)
|
||||
return -EKEYREVOKED;
|
||||
|
||||
datalen = rka->callout_len;
|
||||
ret = datalen;
|
||||
|
||||
|
||||
@@ -824,6 +824,8 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
|
||||
while (id >= 0) {
|
||||
const struct hda_fixup *fix = codec->fixup_list + id;
|
||||
|
||||
if (++depth > 10)
|
||||
break;
|
||||
if (fix->chained_before)
|
||||
apply_fixup(codec, fix->chain_id, action, depth + 1);
|
||||
|
||||
@@ -863,8 +865,6 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
|
||||
}
|
||||
if (!fix->chained || fix->chained_before)
|
||||
break;
|
||||
if (++depth > 10)
|
||||
break;
|
||||
id = fix->chain_id;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6009,7 +6009,8 @@ int snd_hda_gen_init(struct hda_codec *codec)
|
||||
if (spec->init_hook)
|
||||
spec->init_hook(codec);
|
||||
|
||||
snd_hda_apply_verbs(codec);
|
||||
if (!spec->skip_verbs)
|
||||
snd_hda_apply_verbs(codec);
|
||||
|
||||
init_multi_out(codec);
|
||||
init_extra_out(codec);
|
||||
|
||||
@@ -243,6 +243,7 @@ struct hda_gen_spec {
|
||||
unsigned int indep_hp_enabled:1; /* independent HP enabled */
|
||||
unsigned int have_aamix_ctl:1;
|
||||
unsigned int hp_mic_jack_modes:1;
|
||||
unsigned int skip_verbs:1; /* don't apply verbs at snd_hda_gen_init() */
|
||||
|
||||
/* additional mute flags (only effective with auto_mute_via_amp=1) */
|
||||
u64 mute_bits;
|
||||
|
||||
@@ -837,9 +837,11 @@ static int alc_init(struct hda_codec *codec)
|
||||
if (spec->init_hook)
|
||||
spec->init_hook(codec);
|
||||
|
||||
spec->gen.skip_verbs = 1; /* applied in below */
|
||||
snd_hda_gen_init(codec);
|
||||
alc_fix_pll(codec);
|
||||
alc_auto_init_amp(codec, spec->init_amp);
|
||||
snd_hda_apply_verbs(codec); /* apply verbs here after own init */
|
||||
|
||||
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
|
||||
|
||||
@@ -5797,6 +5799,7 @@ enum {
|
||||
ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
|
||||
ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
|
||||
ALC299_FIXUP_PREDATOR_SPK,
|
||||
ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
|
||||
};
|
||||
|
||||
static const struct hda_fixup alc269_fixups[] = {
|
||||
@@ -6837,6 +6840,16 @@ static const struct hda_fixup alc269_fixups[] = {
|
||||
{ }
|
||||
}
|
||||
},
|
||||
[ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = {
|
||||
.type = HDA_FIXUP_PINS,
|
||||
.v.pins = (const struct hda_pintbl[]) {
|
||||
{ 0x14, 0x411111f0 }, /* disable confusing internal speaker */
|
||||
{ 0x19, 0x04a11150 }, /* use as headset mic, without its own jack detect */
|
||||
{ }
|
||||
},
|
||||
.chained = true,
|
||||
.chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
|
||||
},
|
||||
};
|
||||
|
||||
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
||||
@@ -6979,6 +6992,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
||||
SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
|
||||
SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
|
||||
SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
|
||||
SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
|
||||
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
|
||||
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
|
||||
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
|
||||
@@ -6995,6 +7009,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
||||
SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
|
||||
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
|
||||
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
|
||||
SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
|
||||
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
|
||||
SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
|
||||
SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
|
||||
@@ -7072,6 +7087,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
||||
SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
|
||||
SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
|
||||
SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
|
||||
SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
|
||||
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
|
||||
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
|
||||
SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
|
||||
@@ -8946,6 +8962,7 @@ static int patch_alc680(struct hda_codec *codec)
|
||||
static const struct hda_device_id snd_hda_id_realtek[] = {
|
||||
HDA_CODEC_ENTRY(0x10ec0215, "ALC215", patch_alc269),
|
||||
HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
|
||||
HDA_CODEC_ENTRY(0x10ec0222, "ALC222", patch_alc269),
|
||||
HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
|
||||
HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
|
||||
HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
|
||||
|
||||
Reference in New Issue
Block a user