Merge greybus driver tree into 4.8-rc6

This pulls the external greybus driver tree into 4.8-rc6 as it should be
part of the main kernel tree and not live outside in some lonely github
repo, never to be reunited with it's true love...

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2016-09-19 12:29:33 +02:00
commit f44dd18463
148 changed files with 45533 additions and 0 deletions

15
drivers/staging/greybus/.gitignore vendored Normal file
View File

@ -0,0 +1,15 @@
.*
*.cmd
*.ko
*.mod.c
modules.order
Module.symvers
*.o
*.o.*
*.swp
.tmp_versions
tags
cscope.*
ncscope.*
*.patch
tools/loopback_test

View File

@ -0,0 +1,70 @@
/* ES1 AP Bridge Chip USB descriptor definitions */
static const u8 es1_dev_descriptor[] = {
0x12, /* __u8 bLength */
0x01, /* __u8 bDescriptorType; Device */
0x00, 0x02 /* __le16 bcdUSB v2.0 */
0x00, /* __u8 bDeviceClass */
0x00, /* __u8 bDeviceClass */
0x00, /* __u8 bDeviceSubClass; */
0x00, /* __u8 bDeviceProtocol; */
0x40, /* __u8 bMaxPacketSize0; 2^64 = 512 Bytes */
0xff, 0xff, /* __le16 idVendor; 0xffff made up for now */
0x01, 0x00, /* __le16 idProduct; 0x0001 made up for now */
0x01, 0x00, /* __le16 bcdDevice; ES1 */
0x03, /* __u8 iManufacturer; */
0x02, /* __u8 iProduct; */
0x01, /* __u8 iSerialNumber; */
0x01 /* __u8 bNumConfigurations; */
};
static const u8 es1_config_descriptor[] = {
/* one configuration */
0x09, /* __u8 bLength; */
0x02, /* __u8 bDescriptorType; Configuration */
0x19, 0x00, /* __le16 wTotalLength; */
0x01, /* __u8 bNumInterfaces; (1) */
0x01, /* __u8 bConfigurationValue; */
0x00, /* __u8 iConfiguration; */
0xc0, /* __u8 bmAttributes;
Bit 7: must be set,
6: Self-powered,
5: Remote wakeup,
4..0: resvd */
0x00, /* __u8 MaxPower; */
/* one interface */
0x09, /* __u8 if_bLength; */
0x04, /* __u8 if_bDescriptorType; Interface */
0x00, /* __u8 if_bInterfaceNumber; */
0x00, /* __u8 if_bAlternateSetting; */
0x03, /* __u8 if_bNumEndpoints; */
0xff, /* __u8 if_bInterfaceClass; Vendor-specific */
0xff, /* __u8 if_bInterfaceSubClass; Vendor-specific */
0xff, /* __u8 if_bInterfaceProtocol; Vendor-specific */
0x00, /* __u8 if_iInterface; */
/* three endpoints */
0x07, /* __u8 ep_bLength; */
0x05, /* __u8 ep_bDescriptorType; Endpoint */
0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */
0x03, /* __u8 ep_bmAttributes; Interrupt */
0x00, 0x04, /* __le16 ep_wMaxPacketSize; 1024 */
0x40, /* __u8 ep_bInterval; 64ms */
0x07, /* __u8 ep_bLength; */
0x05, /* __u8 ep_bDescriptorType; Endpoint */
0x82, /* __u8 ep_bEndpointAddress; IN Endpoint 2 */
0x02, /* __u8 ep_bmAttributes; Bulk */
0x00, 0x04, /* __le16 ep_wMaxPacketSize; 1024 */
0x40 /* __u8 ep_bInterval; */
0x07, /* __u8 ep_bLength; */
0x05, /* __u8 ep_bDescriptorType; Endpoint */
0x02, /* __u8 ep_bEndpointAddress; Out Endpoint 2 */
0x02, /* __u8 ep_bmAttributes; Bulk */
0x00, 0x04, /* __le16 ep_wMaxPacketSize; 1024 */
0x40 /* __u8 ep_bInterval; */
};

View File

@ -0,0 +1,139 @@
/*
* Sample code to test CAP protocol
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details.
*
* BSD LICENSE
*
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. or Linaro Ltd. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
* LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "../../greybus_authentication.h"
struct cap_ioc_get_endpoint_uid uid;
struct cap_ioc_get_ims_certificate cert = {
.certificate_class = 0,
.certificate_id = 0,
};
struct cap_ioc_authenticate authenticate = {
.auth_type = 0,
.challenge = {0},
};
int main(int argc, char *argv[])
{
unsigned int timeout = 10000;
char *capdev;
int fd, ret;
/* Make sure arguments are correct */
if (argc != 2) {
printf("\nUsage: ./firmware <Path of the gb-cap-X dev>\n");
return 0;
}
capdev = argv[1];
printf("Opening %s authentication device\n", capdev);
fd = open(capdev, O_RDWR);
if (fd < 0) {
printf("Failed to open: %s\n", capdev);
return -1;
}
/* Get UID */
printf("Get UID\n");
ret = ioctl(fd, CAP_IOC_GET_ENDPOINT_UID, &uid);
if (ret < 0) {
printf("Failed to get UID: %s (%d)\n", capdev, ret);
ret = -1;
goto close_fd;
}
printf("UID received: 0x%llx\n", *(long long unsigned int *)(uid.uid));
/* Get certificate */
printf("Get IMS certificate\n");
ret = ioctl(fd, CAP_IOC_GET_IMS_CERTIFICATE, &cert);
if (ret < 0) {
printf("Failed to get IMS certificate: %s (%d)\n", capdev, ret);
ret = -1;
goto close_fd;
}
printf("IMS Certificate size: %d\n", cert.cert_size);
/* Authenticate */
printf("Authenticate module\n");
memcpy(authenticate.uid, uid.uid, 8);
ret = ioctl(fd, CAP_IOC_AUTHENTICATE, &authenticate);
if (ret < 0) {
printf("Failed to authenticate module: %s (%d)\n", capdev, ret);
ret = -1;
goto close_fd;
}
printf("Authenticated, result (%02x), sig-size (%02x)\n",
authenticate.result_code, authenticate.signature_size);
close_fd:
close(fd);
return ret;
}

View File

@ -0,0 +1,333 @@
Firmware Management
-------------------
Copyright 2016 Google Inc.
Copyright 2016 Linaro Ltd.
Interface-Manifest
------------------
All firmware packages on the Modules or Interfaces are managed by a special
Firmware Management Protocol. To support Firmware Management by the AP, the
Interface Manifest shall at least contain the Firmware Management Bundle and a
Firmware Management Protocol CPort within it.
The bundle may contain additional CPorts based on the extra functionality
required to manage firmware packages.
For example, this is how the Firmware Management part of the Interface Manifest
may look like:
; Firmware Management Bundle (Bundle 1):
[bundle-descriptor 1]
class = 0x16
; (Mandatory) Firmware Management Protocol on CPort 1
[cport-descriptor 2]
bundle = 1
protocol = 0x18
; (Optional) Firmware Download Protocol on CPort 2
[cport-descriptor 1]
bundle = 1
protocol = 0x17
; (Optional) SPI protocol on CPort 3
[cport-descriptor 3]
bundle = 1
protocol = 0x0b
; (Optional) Component Authentication Protocol (CAP) on CPort 4
[cport-descriptor 4]
bundle = 1
protocol = 0x19
Sysfs Interfaces - Firmware Management
--------------------------------------
The Firmware Management Protocol interacts with Userspace using the character
device interface. The character device will be present in /dev/ directory
and will be named gb-fw-mgmt-<N>. The number <N> is assigned at runtime.
Identifying the Character Device
================================
There can be multiple devices present in /dev/ directory with name gb-fw-mgmt-N
and user first needs to identify the character device used for
firmware-management for a particular interface.
The Firmware Management core creates a device of class 'gb_fw_mgmt', which shall
be used by the user to identify the right character device for it. The class
device is created within the Bundle directory for a particular Interface.
For example this is how the class-device can be present:
/sys/bus/greybus/devices/1-1/1-1.1/1-1.1.1/gb_fw_mgmt/gb-fw-mgmt-0
The last name in this path: gb-fw-mgmt-0 is precisely the name of the char
device and so the device in this case will be:
/dev/gb-fw-mgmt-0.
Operations on the Char device
=============================
The Character device (gb-fw-mgmt-0 in example) can be opened by the userspace
application and it can perform various 'ioctl' operations on the device. The
device doesn't support any read/write operations.
Following are the IOCTLs and their data structures available to the user:
/* IOCTL support */
#define GB_FW_LOAD_METHOD_UNIPRO 0x01
#define GB_FW_LOAD_METHOD_INTERNAL 0x02
#define GB_FW_LOAD_STATUS_FAILED 0x00
#define GB_FW_LOAD_STATUS_UNVALIDATED 0x01
#define GB_FW_LOAD_STATUS_VALIDATED 0x02
#define GB_FW_LOAD_STATUS_VALIDATION_FAILED 0x03
#define GB_FW_BACKEND_FW_STATUS_SUCCESS 0x01
#define GB_FW_BACKEND_FW_STATUS_FAIL_FIND 0x02
#define GB_FW_BACKEND_FW_STATUS_FAIL_FETCH 0x03
#define GB_FW_BACKEND_FW_STATUS_FAIL_WRITE 0x04
#define GB_FW_BACKEND_FW_STATUS_INT 0x05
#define GB_FW_BACKEND_FW_STATUS_RETRY 0x06
#define GB_FW_BACKEND_FW_STATUS_NOT_SUPPORTED 0x07
#define GB_FW_BACKEND_VERSION_STATUS_SUCCESS 0x01
#define GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE 0x02
#define GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED 0x03
#define GB_FW_BACKEND_VERSION_STATUS_RETRY 0x04
#define GB_FW_BACKEND_VERSION_STATUS_FAIL_INT 0x05
struct fw_mgmt_ioc_get_intf_version {
__u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
__u16 major;
__u16 minor;
} __attribute__ ((__packed__));
struct fw_mgmt_ioc_get_backend_version {
__u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
__u16 major;
__u16 minor;
__u8 status;
} __attribute__ ((__packed__));
struct fw_mgmt_ioc_intf_load_and_validate {
__u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
__u8 load_method;
__u8 status;
__u16 major;
__u16 minor;
} __packed;
struct fw_mgmt_ioc_backend_fw_update {
__u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
__u8 status;
} __packed;
#define FW_MGMT_IOCTL_BASE 'S'
#define FW_MGMT_IOC_GET_INTF_FW _IOR(FW_MGMT_IOCTL_BASE, 0, struct fw_mgmt_ioc_get_intf_version)
#define FW_MGMT_IOC_GET_BACKEND_FW _IOWR(FW_MGMT_IOCTL_BASE, 1, struct fw_mgmt_ioc_get_backend_version)
#define FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE _IOWR(FW_MGMT_IOCTL_BASE, 2, struct fw_mgmt_ioc_intf_load_and_validate)
#define FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE _IOWR(FW_MGMT_IOCTL_BASE, 3, struct fw_mgmt_ioc_backend_fw_update)
#define FW_MGMT_IOC_SET_TIMEOUT_MS _IOW(FW_MGMT_IOCTL_BASE, 4, unsigned int)
#define FW_MGMT_IOC_MODE_SWITCH _IO(FW_MGMT_IOCTL_BASE, 5)
1. FW_MGMT_IOC_GET_INTF_FW:
This ioctl shall be used by the user to get the version and firmware-tag of
the currently running Interface Firmware. All the fields of the 'struct
fw_mgmt_ioc_get_fw' are filled by the kernel.
2. FW_MGMT_IOC_GET_BACKEND_FW:
This ioctl shall be used by the user to get the version of a currently
running Backend Interface Firmware identified by a firmware-tag. The user is
required to fill the 'firmware_tag' field of the 'struct fw_mgmt_ioc_get_fw'
in this case. The 'major' and 'minor' fields are set by the kernel in
response.
3. FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE:
This ioctl shall be used by the user to load an Interface Firmware package on
an Interface. The user needs to fill the 'firmware_tag' and 'load_method'
fields of the 'struct fw_mgmt_ioc_intf_load_and_validate'. The 'status',
'major' and 'minor' fields are set by the kernel in response.
4. FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE:
This ioctl shall be used by the user to request an Interface to update a
Backend Interface Firmware. The user is required to fill the 'firmware_tag'
field of the 'struct fw_mgmt_ioc_get_fw' in this case. The 'status' field is
set by the kernel in response.
5. FW_MGMT_IOC_SET_TIMEOUT_MS:
This ioctl shall be used by the user to increase the timeout interval within
which the firmware must get loaded by the Module. The default timeout is 1
second. The user needs to pass the timeout in milliseconds.
6. FW_MGMT_IOC_MODE_SWITCH:
This ioctl shall be used by the user to mode-switch the module to the
previously loaded interface firmware. If the interface firmware isn't loaded
previously, or if another unsuccessful FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE
operation is started after loading interface firmware, then the firmware core
wouldn't allow mode-switch.
Sysfs Interfaces - Authentication
---------------------------------
The Component Authentication Protocol interacts with Userspace using the
character device interface. The character device will be present in /dev/
directory and will be named gb-authenticate-<N>. The number <N> is assigned at
runtime.
Identifying the Character Device
================================
There can be multiple devices present in /dev/ directory with name
gb-authenticate-N and user first needs to identify the character device used for
authentication a of particular interface.
The Authentication core creates a device of class 'gb_authenticate', which shall
be used by the user to identify the right character device for it. The class
device is created within the Bundle directory for a particular Interface.
For example this is how the class-device can be present:
/sys/bus/greybus/devices/1-1/1-1.1/1-1.1.1/gb_authenticate/gb-authenticate-0
The last name in this path: gb-authenticate-0 is precisely the name of the char
device and so the device in this case will be:
/dev/gb-authenticate-0.
Operations on the Char device
=============================
The Character device (/dev/gb-authenticate-0 in above example) can be opened by
the userspace application and it can perform various 'ioctl' operations on the
device. The device doesn't support any read/write operations.
Following are the IOCTLs and their data structures available to the user:
#define CAP_CERTIFICATE_MAX_SIZE 1600
#define CAP_SIGNATURE_MAX_SIZE 320
/* Certificate class types */
#define CAP_CERT_IMS_EAPC 0x00000001
#define CAP_CERT_IMS_EASC 0x00000002
#define CAP_CERT_IMS_EARC 0x00000003
#define CAP_CERT_IMS_IAPC 0x00000004
#define CAP_CERT_IMS_IASC 0x00000005
#define CAP_CERT_IMS_IARC 0x00000006
/* IMS Certificate response result codes */
#define CAP_IMS_RESULT_CERT_FOUND 0x00
#define CAP_IMS_RESULT_CERT_CLASS_INVAL 0x01
#define CAP_IMS_RESULT_CERT_CORRUPT 0x02
#define CAP_IMS_RESULT_CERT_NOT_FOUND 0x03
/* Authentication types */
#define CAP_AUTH_IMS_PRI 0x00000001
#define CAP_AUTH_IMS_SEC 0x00000002
#define CAP_AUTH_IMS_RSA 0x00000003
/* Authenticate response result codes */
#define CAP_AUTH_RESULT_CR_SUCCESS 0x00
#define CAP_AUTH_RESULT_CR_BAD_TYPE 0x01
#define CAP_AUTH_RESULT_CR_WRONG_EP 0x02
#define CAP_AUTH_RESULT_CR_NO_KEY 0x03
#define CAP_AUTH_RESULT_CR_SIG_FAIL 0x04
/* IOCTL support */
struct cap_ioc_get_endpoint_uid {
__u8 uid[8];
} __attribute__ ((__packed__));
struct cap_ioc_get_ims_certificate {
__u32 certificate_class;
__u32 certificate_id;
__u8 result_code;
__u32 cert_size;
__u8 certificate[CAP_CERTIFICATE_MAX_SIZE];
} __attribute__ ((__packed__));
struct cap_ioc_authenticate {
__u32 auth_type;
__u8 uid[8];
__u8 challenge[32];
__u8 result_code;
__u8 response[64];
__u32 signature_size;
__u8 signature[CAP_SIGNATURE_MAX_SIZE];
} __attribute__ ((__packed__));
#define CAP_IOCTL_BASE 'C'
#define CAP_IOC_GET_ENDPOINT_UID _IOR(CAP_IOCTL_BASE, 0, struct cap_ioc_get_endpoint_uid)
#define CAP_IOC_GET_IMS_CERTIFICATE _IOWR(CAP_IOCTL_BASE, 1, struct cap_ioc_get_ims_certificate)
#define CAP_IOC_AUTHENTICATE _IOWR(CAP_IOCTL_BASE, 2, struct cap_ioc_authenticate)
1. CAP_IOC_GET_ENDPOINT_UID:
This ioctl shall be used by the user to get the endpoint UID associated with
the Interface. All the fields of the 'struct cap_ioc_get_endpoint_uid' are
filled by the kernel.
2. CAP_IOC_GET_IMS_CERTIFICATE:
This ioctl shall be used by the user to retrieve one of the available
cryptographic certificates held by the Interface for use in Component
Authentication. The user is required to fill the 'certificate_class' and
'certificate_id' field of the 'struct cap_ioc_get_ims_certificate' in this
case. The other fields will be set by the kernel in response. The first
'cert_size' bytes of the 'certificate' shall be read by the user and others
must be discarded.
3. CAP_IOC_AUTHENTICATE:
This ioctl shall be used by the user to authenticate the Module attached to
an Interface. The user needs to fill the 'auth_type', 'uid', and 'challenge'
fields of the 'struct cap_ioc_authenticate'. The other fields will be set by
the kernel in response. The first 'signature_size' bytes of the 'signature'
shall be read by the user and others must be discarded.
Sysfs Interfaces - Firmware Download
------------------------------------
The Firmware Download Protocol uses the existing Linux Kernel's Firmware class
and the interface provided to userspace are described in:
Documentation/firmware_class/.
Sysfs Interfaces - SPI Flash
----------------------------
The SPI flash is exposed in userspace as a MTD device and is created
within the Bundle directory. For example, this is how the path may look like:
$ ls /sys/bus/greybus/devices/1-1/1-1.1/1-1.1.1/spi_master/spi32766/spi32766.0/mtd
mtd0 mtd0ro
Sample Applications
-------------------
The current directory also provides a firmware.c test application, which can be
referenced while developing userspace application to talk to firmware-management
protocol.
The current directory also provides a authenticate.c test application, which can
be referenced while developing userspace application to talk to
component authentication protocol.

View File

@ -0,0 +1,262 @@
/*
* Sample code to test firmware-management protocol
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details.
*
* BSD LICENSE
*
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. or Linaro Ltd. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
* LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "../../greybus_firmware.h"
#define FW_DEV_DEFAULT "/dev/gb-fw-mgmt-0"
#define FW_TAG_INT_DEFAULT "s3f"
#define FW_TAG_BCND_DEFAULT "bf_01"
#define FW_UPDATE_TYPE_DEFAULT 0
#define FW_TIMEOUT_DEFAULT 10000;
static const char *firmware_tag;
static const char *fwdev = FW_DEV_DEFAULT;
static int fw_update_type = FW_UPDATE_TYPE_DEFAULT;
static int fw_timeout = FW_TIMEOUT_DEFAULT;
static struct fw_mgmt_ioc_get_intf_version intf_fw_info;
static struct fw_mgmt_ioc_get_backend_version backend_fw_info;
static struct fw_mgmt_ioc_intf_load_and_validate intf_load;
static struct fw_mgmt_ioc_backend_fw_update backend_update;
static void usage(void)
{
printf("\nUsage: ./firmware <gb-fw-mgmt-X (default: gb-fw-mgmt-0)> <interface: 0, backend: 1 (default: 0)> <firmware-tag> (default: \"s3f\"/\"bf_01\") <timeout (default: 10000 ms)>\n");
}
static int update_intf_firmware(int fd)
{
int ret;
/* Get Interface Firmware Version */
printf("Get Interface Firmware Version\n");
ret = ioctl(fd, FW_MGMT_IOC_GET_INTF_FW, &intf_fw_info);
if (ret < 0) {
printf("Failed to get interface firmware version: %s (%d)\n",
fwdev, ret);
return -1;
}
printf("Interface Firmware tag (%s), major (%d), minor (%d)\n",
intf_fw_info.firmware_tag, intf_fw_info.major,
intf_fw_info.minor);
/* Try Interface Firmware load over Unipro */
printf("Loading Interface Firmware\n");
intf_load.load_method = GB_FW_U_LOAD_METHOD_UNIPRO;
intf_load.status = 0;
intf_load.major = 0;
intf_load.minor = 0;
strncpy((char *)&intf_load.firmware_tag, firmware_tag,
GB_FIRMWARE_U_TAG_MAX_SIZE);
ret = ioctl(fd, FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE, &intf_load);
if (ret < 0) {
printf("Failed to load interface firmware: %s (%d)\n", fwdev,
ret);
return -1;
}
if (intf_load.status != GB_FW_U_LOAD_STATUS_VALIDATED &&
intf_load.status != GB_FW_U_LOAD_STATUS_UNVALIDATED) {
printf("Load status says loading failed: %d\n",
intf_load.status);
return -1;
}
printf("Interface Firmware (%s) Load done: major: %d, minor: %d, status: %d\n",
firmware_tag, intf_load.major, intf_load.minor,
intf_load.status);
/* Initiate Mode-switch to the newly loaded firmware */
printf("Initiate Mode switch\n");
ret = ioctl(fd, FW_MGMT_IOC_MODE_SWITCH);
if (ret < 0)
printf("Failed to initiate mode-switch (%d)\n", ret);
return ret;
}
static int update_backend_firmware(int fd)
{
int ret;
/* Get Backend Firmware Version */
printf("Getting Backend Firmware Version\n");
strncpy((char *)&backend_fw_info.firmware_tag, firmware_tag,
GB_FIRMWARE_U_TAG_MAX_SIZE);
retry_fw_version:
ret = ioctl(fd, FW_MGMT_IOC_GET_BACKEND_FW, &backend_fw_info);
if (ret < 0) {
printf("Failed to get backend firmware version: %s (%d)\n",
fwdev, ret);
return -1;
}
printf("Backend Firmware tag (%s), major (%d), minor (%d), status (%d)\n",
backend_fw_info.firmware_tag, backend_fw_info.major,
backend_fw_info.minor, backend_fw_info.status);
if (backend_fw_info.status == GB_FW_U_BACKEND_VERSION_STATUS_RETRY)
goto retry_fw_version;
if ((backend_fw_info.status != GB_FW_U_BACKEND_VERSION_STATUS_SUCCESS)
&& (backend_fw_info.status != GB_FW_U_BACKEND_VERSION_STATUS_NOT_AVAILABLE)) {
printf("Failed to get backend firmware version: %s (%d)\n",
fwdev, backend_fw_info.status);
return -1;
}
/* Try Backend Firmware Update over Unipro */
printf("Updating Backend Firmware\n");
strncpy((char *)&backend_update.firmware_tag, firmware_tag,
GB_FIRMWARE_U_TAG_MAX_SIZE);
retry_fw_update:
backend_update.status = 0;
ret = ioctl(fd, FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE, &backend_update);
if (ret < 0) {
printf("Failed to load backend firmware: %s (%d)\n", fwdev, ret);
return -1;
}
if (backend_update.status == GB_FW_U_BACKEND_FW_STATUS_RETRY) {
printf("Retrying firmware update: %d\n", backend_update.status);
goto retry_fw_update;
}
if (backend_update.status != GB_FW_U_BACKEND_FW_STATUS_SUCCESS) {
printf("Load status says loading failed: %d\n",
backend_update.status);
} else {
printf("Backend Firmware (%s) Load done: status: %d\n",
firmware_tag, backend_update.status);
}
return 0;
}
int main(int argc, char *argv[])
{
int fd, ret;
if (argc > 1 &&
(!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help"))) {
usage();
return -1;
}
if (argc > 1)
fwdev = argv[1];
if (argc > 2)
sscanf(argv[2], "%u", &fw_update_type);
if (argc > 3) {
firmware_tag = argv[3];
} else if (!fw_update_type) {
firmware_tag = FW_TAG_INT_DEFAULT;
} else {
firmware_tag = FW_TAG_BCND_DEFAULT;
}
if (argc > 4)
sscanf(argv[4], "%u", &fw_timeout);
printf("Trying Firmware update: fwdev: %s, type: %s, tag: %s, timeout: %d\n",
fwdev, fw_update_type == 0 ? "interface" : "backend",
firmware_tag, fw_timeout);
printf("Opening %s firmware management device\n", fwdev);
fd = open(fwdev, O_RDWR);
if (fd < 0) {
printf("Failed to open: %s\n", fwdev);
return -1;
}
/* Set Timeout */
printf("Setting timeout to %u ms\n", fw_timeout);
ret = ioctl(fd, FW_MGMT_IOC_SET_TIMEOUT_MS, &fw_timeout);
if (ret < 0) {
printf("Failed to set timeout: %s (%d)\n", fwdev, ret);
ret = -1;
goto close_fd;
}
if (!fw_update_type)
ret = update_intf_firmware(fd);
else
ret = update_backend_firmware(fd);
close_fd:
close(fd);
return ret;
}

View File

@ -0,0 +1,275 @@
What: /sys/bus/greybus/devices/greybusN
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The "root" greybus device for the Greybus device tree, or bus,
where N is a dynamically assigned 1-based id.
What: /sys/bus/greybus/devices/greybusN/bus_id
Date: April 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The ID of the "root" greybus device, or bus.
What: /sys/bus/greybus/devices/N-M
Date: March 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
A Module M on the bus N, where M is the 1-byte interface
ID of the module's primary interface.
What: /sys/bus/greybus/devices/N-M/eject
Date: March 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Writing a non-zero argument to this attibute disables the
module's interfaces before physically ejecting it.
What: /sys/bus/greybus/devices/N-M/module_id
Date: March 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The ID of a Greybus module, corresponding to the ID of its
primary interface.
What: /sys/bus/greybus/devices/N-M/num_interfaces
Date: March 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The number of interfaces of a module.
What: /sys/bus/greybus/devices/N-M.I
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
An Interface I on the bus N and module N-M, where I is the
1-byte interface ID.
What: /sys/bus/greybus/devices/N-M.I/current_now
Date: March 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Current measurement of the interface in microamps (uA)
What: /sys/bus/greybus/devices/N-M.I/ddbl1_manufacturer_id
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Unipro Device Descriptor Block Level 1 manufacturer ID for the
greybus Interface.
What: /sys/bus/greybus/devices/N-M.I/ddbl1_product_id
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Unipro Device Descriptor Block Level 1 product ID for the
greybus Interface.
What: /sys/bus/greybus/devices/N-M.I/interface_id
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The ID of a Greybus interface.
What: /sys/bus/greybus/devices/N-M.I/interface_type
Date: June 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The type of a Greybus interface; "dummy", "unipro", "greybus",
or "unknown".
What: /sys/bus/greybus/devices/N-M.I/power_now
Date: March 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Power measurement of the interface in microwatts (uW)
What: /sys/bus/greybus/devices/N-M.I/power_state
Date: March 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
This file reflects the power state of a Greybus interface. If
the value read from it is "on", then power is currently
supplied to the interface. Otherwise it will read "off" and
power is currently not supplied to the interface.
If the value read is "off", then writing "on" (or '1', 'y',
'Y') to this file will enable power to the interface and an
attempt to boot and possibly enumerate it will be made. Note
that on errors, the interface will again be powered down.
If the value read is "on", then writing "off" (or '0', 'n',
'N') to this file will power down the interface.
What: /sys/bus/greybus/devices/N-M.I/product_id
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Product ID of a Greybus interface.
What: /sys/bus/greybus/devices/N-M.I/serial_number
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Serial Number of the Greybus interface, represented by a 64 bit
hexadecimal number.
What: /sys/bus/greybus/devices/N-M.I/vendor_id
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Vendor ID of a Greybus interface.
What: /sys/bus/greybus/devices/N-M.I/voltage_now
Date: March 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Voltage measurement of the interface in microvolts (uV)
What: /sys/bus/greybus/devices/N-M.I.ctrl
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Abstract control device for interface I that represents the
current mode of an enumerated Greybus interface.
What: /sys/bus/greybus/devices/N-M.I.ctrl/product_string
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Product ID string of a Greybus interface.
What: /sys/bus/greybus/devices/N-M.I.ctrl/vendor_string
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Vendor ID string of a Greybus interface.
What: /sys/bus/greybus/devices/N-M.I.B
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
A bundle B on the Interface I, B is replaced by a 1-byte
number representing the bundle.
What: /sys/bus/greybus/devices/N-M.I.B/bundle_class
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The greybus class of the bundle B.
What: /sys/bus/greybus/devices/N-M.I.B/bundle_id
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The interface-unique id of the bundle B.
What: /sys/bus/greybus/devices/N-M.I.B/gpbX
Date: April 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The General Purpose Bridged PHY device of the bundle B,
where X is a dynamically assigned 0-based id.
What: /sys/bus/greybus/devices/N-M.I.B/state
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
A bundle has a state that is managed by the userspace
Endo process. This file allows that Endo to signal
other Android HALs that the state of the bundle has
changed to a specific value. When written to, any
process watching the file will be woken up, and the new
value can be read. It's a "poor-man's IPC", yes, but
simplifies the Android userspace code immensely.
What: /sys/bus/greybus/devices/N-svc
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The singleton SVC device of bus N.
What: /sys/bus/greybus/devices/N-svc/ap_intf_id
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The AP interface ID, a 1-byte non-zero integer which
defines the position of the AP module on the frame.
The interface positions are defined in the GMP
Module Developer Kit.
What: /sys/bus/greybus/devices/N-svc/endo_id
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The Endo ID, which is a 2-byte hexadecimal value
defined by the Endo layout scheme, documented in
the GMP Module Developer Kit.
What: /sys/bus/greybus/devices/N-svc/intf_eject
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
Write the number of the interface that you wish to
forcibly eject from the system.
What: /sys/bus/greybus/devices/N-svc/version
Date: October 2015
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
The version number of the firmware in the SVC device.
What: /sys/bus/greybus/devices/N-svc/watchdog
Date: October 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
If the SVC watchdog is enabled or not. Writing 0 to this
file will disable the watchdog, writing 1 will enable it.
What: /sys/bus/greybus/devices/N-svc/watchdog_action
Date: July 2016
KernelVersion: 4.XX
Contact: Greg Kroah-Hartman <greg@kroah.com>
Description:
This attribute indicates the action to be performed upon SVC
watchdog bite.
The action can be one of the "reset" or "panic". Writing either
one of the "reset" or "panic" will change the behavior of SVC
watchdog bite. Default value is "reset".
"reset" means the UniPro subsystem is to be reset.
"panic" means SVC watchdog bite will cause kernel to panic.

View File

@ -0,0 +1 @@
1

View File

@ -0,0 +1 @@
2

View File

@ -0,0 +1,339 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc., <http://fsf.org/>
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
{description}
Copyright (C) {year} {fullname}
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
{signature of Ty Coon}, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.

View File

@ -0,0 +1,147 @@
greybus-y := core.o \
debugfs.o \
hd.o \
manifest.o \
module.o \
interface.o \
bundle.o \
connection.o \
control.o \
svc.o \
svc_watchdog.o \
operation.o \
timesync.o \
timesync_platform.o
gb-gbphy-y := gbphy.o
# Prefix all modules with gb-
gb-vibrator-y := vibrator.o
gb-power-supply-y := power_supply.o
gb-log-y := log.o
gb-loopback-y := loopback.o
gb-light-y := light.o
gb-raw-y := raw.o
gb-hid-y := hid.o
gb-es2-y := es2.o
gb-arche-y := arche-platform.o arche-apb-ctrl.o
gb-audio-module-y := audio_module.o audio_topology.o
gb-audio-codec-y := audio_codec.o
gb-audio-gb-y := audio_gb.o
gb-audio-apbridgea-y := audio_apbridgea.o
gb-audio-manager-y += audio_manager.o
gb-audio-manager-y += audio_manager_module.o
gb-bootrom-y := bootrom.o
gb-camera-y := camera.o
gb-firmware-y := fw-core.o fw-download.o fw-management.o authentication.o
gb-spilib-y := spilib.o
gb-sdio-y := sdio.o
gb-uart-y := uart.o
gb-pwm-y := pwm.o
gb-gpio-y := gpio.o
gb-i2c-y := i2c.o
gb-usb-y := usb.o
gb-spi-y := spi.o
obj-m += greybus.o
obj-m += gb-gbphy.o
obj-m += gb-vibrator.o
obj-m += gb-power-supply.o
obj-m += gb-log.o
obj-m += gb-loopback.o
obj-m += gb-light.o
obj-m += gb-hid.o
obj-m += gb-raw.o
obj-m += gb-es2.o
ifeq ($(CONFIG_USB_HSIC_USB3613),y)
obj-m += gb-arche.o
endif
ifeq ($(CONFIG_ARCH_MSM8994),y)
obj-m += gb-audio-codec.o
obj-m += gb-audio-module.o
obj-m += gb-camera.o
endif
obj-m += gb-audio-gb.o
obj-m += gb-audio-apbridgea.o
obj-m += gb-audio-manager.o
obj-m += gb-bootrom.o
obj-m += gb-firmware.o
obj-m += gb-spilib.o
obj-m += gb-sdio.o
obj-m += gb-uart.o
obj-m += gb-pwm.o
obj-m += gb-gpio.o
obj-m += gb-i2c.o
obj-m += gb-usb.o
obj-m += gb-spi.o
KERNELVER ?= $(shell uname -r)
KERNELDIR ?= /lib/modules/$(KERNELVER)/build
INSTALL_MOD_PATH ?= /..
PWD := $(shell pwd)
# kernel config option that shall be enable
CONFIG_OPTIONS_ENABLE := POWER_SUPPLY PWM SYSFS SPI USB SND_SOC MMC LEDS_CLASS INPUT
# kernel config option that shall be disable
CONFIG_OPTIONS_DISABLE :=
# this only run in kbuild part of the makefile
ifneq ($(KERNELRELEASE),)
# This function returns the argument version if current kernel version is minor
# than the passed version, return 1 if equal or the current kernel version if it
# is greater than argument version.
kvers_cmp=$(shell [ "$(KERNELVERSION)" = "$(1)" ] && echo 1 || printf "$(1)\n$(KERNELVERSION)" | sort -V | tail -1)
ifneq ($(call kvers_cmp,"3.19.0"),3.19.0)
CONFIG_OPTIONS_ENABLE += LEDS_CLASS_FLASH
endif
ifneq ($(call kvers_cmp,"4.2.0"),4.2.0)
CONFIG_OPTIONS_ENABLE += V4L2_FLASH_LED_CLASS
endif
$(foreach opt,$(CONFIG_OPTIONS_ENABLE),$(if $(CONFIG_$(opt)),, \
$(error CONFIG_$(opt) is disabled in the kernel configuration and must be enable \
to continue compilation)))
$(foreach opt,$(CONFIG_OPTIONS_DISABLE),$(if $(filter m y, $(CONFIG_$(opt))), \
$(error CONFIG_$(opt) is enabled in the kernel configuration and must be disable \
to continue compilation),))
endif
# add -Wall to try to catch everything we can.
ccflags-y := -Wall
# needed for trace events
ccflags-y += -I$(src)
GB_AUDIO_MANAGER_SYSFS ?= true
ifeq ($(GB_AUDIO_MANAGER_SYSFS),true)
gb-audio-manager-y += audio_manager_sysfs.o
ccflags-y += -DGB_AUDIO_MANAGER_SYSFS
endif
all: module
tools::
$(MAKE) -C tools KERNELDIR=$(realpath $(KERNELDIR))
module:
$(MAKE) -C $(KERNELDIR) M=$(PWD)
check:
$(MAKE) -C $(KERNELDIR) M=$(PWD) C=2 CF="-D__CHECK_ENDIAN__"
clean:
rm -f *.o *~ core .depend .*.cmd *.ko *.mod.c
rm -f Module.markers Module.symvers modules.order
rm -rf .tmp_versions Modules.symvers
$(MAKE) -C tools clean
coccicheck:
$(MAKE) -C $(KERNELDIR) M=$(PWD) coccicheck
install: module
mkdir -p $(INSTALL_MOD_PATH)/lib/modules/$(KERNELVER)/kernel/drivers/greybus/
cp -f *.ko $(INSTALL_MOD_PATH)/lib/modules/$(KERNELVER)/kernel/drivers/greybus/
depmod -b $(INSTALL_MOD_PATH) -a $(KERNELVER)

View File

@ -0,0 +1,10 @@
Greybus kernel code
To build against the running kernel (odds are you don't want this):
make
To build against a specific kernel source tree (odds are you want this):
KERNELDIR=/home/some/random/place make
Any questions / concerns about this code base, please email:
Greg Kroah-Hartman <greg@kroah.com>

View File

@ -0,0 +1,522 @@
/*
* Arche Platform driver to control APB.
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include "arche_platform.h"
struct arche_apb_ctrl_drvdata {
/* Control GPIO signals to and from AP <=> AP Bridges */
int resetn_gpio;
int boot_ret_gpio;
int pwroff_gpio;
int wake_in_gpio;
int wake_out_gpio;
int pwrdn_gpio;
enum arche_platform_state state;
bool init_disabled;
struct regulator *vcore;
struct regulator *vio;
int clk_en_gpio;
struct clk *clk;
struct pinctrl *pinctrl;
struct pinctrl_state *pin_default;
/* V2: SPI Bus control */
int spi_en_gpio;
bool spi_en_polarity_high;
};
/*
* Note that these low level api's are active high
*/
static inline void deassert_reset(unsigned int gpio)
{
gpio_set_value(gpio, 1);
}
static inline void assert_reset(unsigned int gpio)
{
gpio_set_value(gpio, 0);
}
/*
* Note: Please do not modify the below sequence, as it is as per the spec
*/
static int coldboot_seq(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
int ret;
if (apb->init_disabled ||
apb->state == ARCHE_PLATFORM_STATE_ACTIVE)
return 0;
/* Hold APB in reset state */
assert_reset(apb->resetn_gpio);
if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
gpio_is_valid(apb->spi_en_gpio))
devm_gpio_free(dev, apb->spi_en_gpio);
/* Enable power to APB */
if (!IS_ERR(apb->vcore)) {
ret = regulator_enable(apb->vcore);
if (ret) {
dev_err(dev, "failed to enable core regulator\n");
return ret;
}
}
if (!IS_ERR(apb->vio)) {
ret = regulator_enable(apb->vio);
if (ret) {
dev_err(dev, "failed to enable IO regulator\n");
return ret;
}
}
apb_bootret_deassert(dev);
/* On DB3 clock was not mandatory */
if (gpio_is_valid(apb->clk_en_gpio))
gpio_set_value(apb->clk_en_gpio, 1);
usleep_range(100, 200);
/* deassert reset to APB : Active-low signal */
deassert_reset(apb->resetn_gpio);
apb->state = ARCHE_PLATFORM_STATE_ACTIVE;
return 0;
}
static int fw_flashing_seq(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
int ret;
if (apb->init_disabled ||
apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
return 0;
ret = regulator_enable(apb->vcore);
if (ret) {
dev_err(dev, "failed to enable core regulator\n");
return ret;
}
ret = regulator_enable(apb->vio);
if (ret) {
dev_err(dev, "failed to enable IO regulator\n");
return ret;
}
if (gpio_is_valid(apb->spi_en_gpio)) {
unsigned long flags;
if (apb->spi_en_polarity_high)
flags = GPIOF_OUT_INIT_HIGH;
else
flags = GPIOF_OUT_INIT_LOW;
ret = devm_gpio_request_one(dev, apb->spi_en_gpio,
flags, "apb_spi_en");
if (ret) {
dev_err(dev, "Failed requesting SPI bus en gpio %d\n",
apb->spi_en_gpio);
return ret;
}
}
/* for flashing device should be in reset state */
assert_reset(apb->resetn_gpio);
apb->state = ARCHE_PLATFORM_STATE_FW_FLASHING;
return 0;
}
static int standby_boot_seq(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
if (apb->init_disabled)
return 0;
/* Even if it is in OFF state, then we do not want to change the state */
if (apb->state == ARCHE_PLATFORM_STATE_STANDBY ||
apb->state == ARCHE_PLATFORM_STATE_OFF)
return 0;
if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
gpio_is_valid(apb->spi_en_gpio))
devm_gpio_free(dev, apb->spi_en_gpio);
/*
* As per WDM spec, do nothing
*
* Pasted from WDM spec,
* - A falling edge on POWEROFF_L is detected (a)
* - WDM enters standby mode, but no output signals are changed
* */
/* TODO: POWEROFF_L is input to WDM module */
apb->state = ARCHE_PLATFORM_STATE_STANDBY;
return 0;
}
static void poweroff_seq(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
if (apb->init_disabled || apb->state == ARCHE_PLATFORM_STATE_OFF)
return;
if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
gpio_is_valid(apb->spi_en_gpio))
devm_gpio_free(dev, apb->spi_en_gpio);
/* disable the clock */
if (gpio_is_valid(apb->clk_en_gpio))
gpio_set_value(apb->clk_en_gpio, 0);
if (!IS_ERR(apb->vcore) && regulator_is_enabled(apb->vcore) > 0)
regulator_disable(apb->vcore);
if (!IS_ERR(apb->vio) && regulator_is_enabled(apb->vio) > 0)
regulator_disable(apb->vio);
/* As part of exit, put APB back in reset state */
assert_reset(apb->resetn_gpio);
apb->state = ARCHE_PLATFORM_STATE_OFF;
/* TODO: May have to send an event to SVC about this exit */
}
void apb_bootret_assert(struct device *dev)
{
struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
gpio_set_value(apb->boot_ret_gpio, 1);
}
void apb_bootret_deassert(struct device *dev)
{
struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
gpio_set_value(apb->boot_ret_gpio, 0);
}
int apb_ctrl_coldboot(struct device *dev)
{
return coldboot_seq(to_platform_device(dev));
}
int apb_ctrl_fw_flashing(struct device *dev)
{
return fw_flashing_seq(to_platform_device(dev));
}
int apb_ctrl_standby_boot(struct device *dev)
{
return standby_boot_seq(to_platform_device(dev));
}
void apb_ctrl_poweroff(struct device *dev)
{
poweroff_seq(to_platform_device(dev));
}
static ssize_t state_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct platform_device *pdev = to_platform_device(dev);
struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
int ret = 0;
bool is_disabled;
if (sysfs_streq(buf, "off")) {
if (apb->state == ARCHE_PLATFORM_STATE_OFF)
return count;
poweroff_seq(pdev);
} else if (sysfs_streq(buf, "active")) {
if (apb->state == ARCHE_PLATFORM_STATE_ACTIVE)
return count;
poweroff_seq(pdev);
is_disabled = apb->init_disabled;
apb->init_disabled = false;
ret = coldboot_seq(pdev);
if (ret)
apb->init_disabled = is_disabled;
} else if (sysfs_streq(buf, "standby")) {
if (apb->state == ARCHE_PLATFORM_STATE_STANDBY)
return count;
ret = standby_boot_seq(pdev);
} else if (sysfs_streq(buf, "fw_flashing")) {
if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
return count;
/* First we want to make sure we power off everything
* and then enter FW flashing state */
poweroff_seq(pdev);
ret = fw_flashing_seq(pdev);
} else {
dev_err(dev, "unknown state\n");
ret = -EINVAL;
}
return ret ? ret : count;
}
static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
switch (apb->state) {
case ARCHE_PLATFORM_STATE_OFF:
return sprintf(buf, "off%s\n",
apb->init_disabled ? ",disabled" : "");
case ARCHE_PLATFORM_STATE_ACTIVE:
return sprintf(buf, "active\n");
case ARCHE_PLATFORM_STATE_STANDBY:
return sprintf(buf, "standby\n");
case ARCHE_PLATFORM_STATE_FW_FLASHING:
return sprintf(buf, "fw_flashing\n");
default:
return sprintf(buf, "unknown state\n");
}
}
static DEVICE_ATTR_RW(state);
static int apb_ctrl_get_devtree_data(struct platform_device *pdev,
struct arche_apb_ctrl_drvdata *apb)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
int ret;
apb->resetn_gpio = of_get_named_gpio(np, "reset-gpios", 0);
if (apb->resetn_gpio < 0) {
dev_err(dev, "failed to get reset gpio\n");
return apb->resetn_gpio;
}
ret = devm_gpio_request_one(dev, apb->resetn_gpio,
GPIOF_OUT_INIT_LOW, "apb-reset");
if (ret) {
dev_err(dev, "Failed requesting reset gpio %d\n",
apb->resetn_gpio);
return ret;
}
apb->boot_ret_gpio = of_get_named_gpio(np, "boot-ret-gpios", 0);
if (apb->boot_ret_gpio < 0) {
dev_err(dev, "failed to get boot retention gpio\n");
return apb->boot_ret_gpio;
}
ret = devm_gpio_request_one(dev, apb->boot_ret_gpio,
GPIOF_OUT_INIT_LOW, "boot retention");
if (ret) {
dev_err(dev, "Failed requesting bootret gpio %d\n",
apb->boot_ret_gpio);
return ret;
}
/* It's not mandatory to support power management interface */
apb->pwroff_gpio = of_get_named_gpio(np, "pwr-off-gpios", 0);
if (apb->pwroff_gpio < 0) {
dev_err(dev, "failed to get power off gpio\n");
return apb->pwroff_gpio;
}
ret = devm_gpio_request_one(dev, apb->pwroff_gpio,
GPIOF_IN, "pwroff_n");
if (ret) {
dev_err(dev, "Failed requesting pwroff_n gpio %d\n",
apb->pwroff_gpio);
return ret;
}
/* Do not make clock mandatory as of now (for DB3) */
apb->clk_en_gpio = of_get_named_gpio(np, "clock-en-gpio", 0);
if (apb->clk_en_gpio < 0) {
dev_warn(dev, "failed to get clock en gpio\n");
} else if (gpio_is_valid(apb->clk_en_gpio)) {
ret = devm_gpio_request_one(dev, apb->clk_en_gpio,
GPIOF_OUT_INIT_LOW, "apb_clk_en");
if (ret) {
dev_warn(dev, "Failed requesting APB clock en gpio %d\n",
apb->clk_en_gpio);
return ret;
}
}
apb->pwrdn_gpio = of_get_named_gpio(np, "pwr-down-gpios", 0);
if (apb->pwrdn_gpio < 0)
dev_warn(dev, "failed to get power down gpio\n");
/* Regulators are optional, as we may have fixed supply coming in */
apb->vcore = devm_regulator_get(dev, "vcore");
if (IS_ERR(apb->vcore))
dev_warn(dev, "no core regulator found\n");
apb->vio = devm_regulator_get(dev, "vio");
if (IS_ERR(apb->vio))
dev_warn(dev, "no IO regulator found\n");
apb->pinctrl = devm_pinctrl_get(&pdev->dev);
if (IS_ERR(apb->pinctrl)) {
dev_err(&pdev->dev, "could not get pinctrl handle\n");
return PTR_ERR(apb->pinctrl);
}
apb->pin_default = pinctrl_lookup_state(apb->pinctrl, "default");
if (IS_ERR(apb->pin_default)) {
dev_err(&pdev->dev, "could not get default pin state\n");
return PTR_ERR(apb->pin_default);
}
/* Only applicable for platform >= V2 */
apb->spi_en_gpio = of_get_named_gpio(np, "spi-en-gpio", 0);
if (apb->spi_en_gpio >= 0) {
if (of_property_read_bool(pdev->dev.of_node,
"spi-en-active-high"))
apb->spi_en_polarity_high = true;
}
return 0;
}
static int arche_apb_ctrl_probe(struct platform_device *pdev)
{
int ret;
struct arche_apb_ctrl_drvdata *apb;
struct device *dev = &pdev->dev;
apb = devm_kzalloc(&pdev->dev, sizeof(*apb), GFP_KERNEL);
if (!apb)
return -ENOMEM;
ret = apb_ctrl_get_devtree_data(pdev, apb);
if (ret) {
dev_err(dev, "failed to get apb devicetree data %d\n", ret);
return ret;
}
/* Initially set APB to OFF state */
apb->state = ARCHE_PLATFORM_STATE_OFF;
/* Check whether device needs to be enabled on boot */
if (of_property_read_bool(pdev->dev.of_node, "arche,init-disable"))
apb->init_disabled = true;
platform_set_drvdata(pdev, apb);
/* Create sysfs interface to allow user to change state dynamically */
ret = device_create_file(dev, &dev_attr_state);
if (ret) {
dev_err(dev, "failed to create state file in sysfs\n");
return ret;
}
dev_info(&pdev->dev, "Device registered successfully\n");
return 0;
}
static int arche_apb_ctrl_remove(struct platform_device *pdev)
{
device_remove_file(&pdev->dev, &dev_attr_state);
poweroff_seq(pdev);
platform_set_drvdata(pdev, NULL);
return 0;
}
static int arche_apb_ctrl_suspend(struct device *dev)
{
/*
* If timing profile permits, we may shutdown bridge
* completely
*
* TODO: sequence ??
*
* Also, need to make sure we meet precondition for unipro suspend
* Precondition: Definition ???
*/
return 0;
}
static int arche_apb_ctrl_resume(struct device *dev)
{
/*
* Atleast for ES2 we have to meet the delay requirement between
* unipro switch and AP bridge init, depending on whether bridge is in
* OFF state or standby state.
*
* Based on whether bridge is in standby or OFF state we may have to
* assert multiple signals. Please refer to WDM spec, for more info.
*
*/
return 0;
}
static void arche_apb_ctrl_shutdown(struct platform_device *pdev)
{
apb_ctrl_poweroff(&pdev->dev);
}
static SIMPLE_DEV_PM_OPS(arche_apb_ctrl_pm_ops, arche_apb_ctrl_suspend,
arche_apb_ctrl_resume);
static struct of_device_id arche_apb_ctrl_of_match[] = {
{ .compatible = "usbffff,2", },
{ },
};
static struct platform_driver arche_apb_ctrl_device_driver = {
.probe = arche_apb_ctrl_probe,
.remove = arche_apb_ctrl_remove,
.shutdown = arche_apb_ctrl_shutdown,
.driver = {
.name = "arche-apb-ctrl",
.pm = &arche_apb_ctrl_pm_ops,
.of_match_table = arche_apb_ctrl_of_match,
}
};
int __init arche_apb_init(void)
{
return platform_driver_register(&arche_apb_ctrl_device_driver);
}
void __exit arche_apb_exit(void)
{
platform_driver_unregister(&arche_apb_ctrl_device_driver);
}

View File

@ -0,0 +1,828 @@
/*
* Arche Platform driver to enable Unipro link.
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/suspend.h>
#include <linux/time.h>
#include "arche_platform.h"
#include "greybus.h"
#include <linux/usb/usb3613.h>
#define WD_COLDBOOT_PULSE_WIDTH_MS 30
enum svc_wakedetect_state {
WD_STATE_IDLE, /* Default state = pulled high/low */
WD_STATE_BOOT_INIT, /* WD = falling edge (low) */
WD_STATE_COLDBOOT_TRIG, /* WD = rising edge (high), > 30msec */
WD_STATE_STANDBYBOOT_TRIG, /* As of now not used ?? */
WD_STATE_COLDBOOT_START, /* Cold boot process started */
WD_STATE_STANDBYBOOT_START, /* Not used */
WD_STATE_TIMESYNC,
};
struct arche_platform_drvdata {
/* Control GPIO signals to and from AP <=> SVC */
int svc_reset_gpio;
bool is_reset_act_hi;
int svc_sysboot_gpio;
int wake_detect_gpio; /* bi-dir,maps to WAKE_MOD & WAKE_FRAME signals */
enum arche_platform_state state;
int svc_refclk_req;
struct clk *svc_ref_clk;
struct pinctrl *pinctrl;
struct pinctrl_state *pin_default;
int num_apbs;
enum svc_wakedetect_state wake_detect_state;
int wake_detect_irq;
spinlock_t wake_lock; /* Protect wake_detect_state */
struct mutex platform_state_mutex; /* Protect state */
wait_queue_head_t wq; /* WQ for arche_pdata->state */
unsigned long wake_detect_start;
struct notifier_block pm_notifier;
struct device *dev;
struct gb_timesync_svc *timesync_svc_pdata;
};
static int arche_apb_bootret_assert(struct device *dev, void *data)
{
apb_bootret_assert(dev);
return 0;
}
static int arche_apb_bootret_deassert(struct device *dev, void *data)
{
apb_bootret_deassert(dev);
return 0;
}
/* Requires calling context to hold arche_pdata->platform_state_mutex */
static void arche_platform_set_state(struct arche_platform_drvdata *arche_pdata,
enum arche_platform_state state)
{
arche_pdata->state = state;
}
/*
* arche_platform_change_state: Change the operational state
*
* This exported function allows external drivers to change the state
* of the arche-platform driver.
* Note that this function only supports transitions between two states
* with limited functionality.
*
* - ARCHE_PLATFORM_STATE_TIME_SYNC:
* Once set, allows timesync operations between SVC <=> AP and makes
* sure that arche-platform driver ignores any subsequent events/pulses
* from SVC over wake/detect.
*
* - ARCHE_PLATFORM_STATE_ACTIVE:
* Puts back driver to active state, where any pulse from SVC on wake/detect
* line would trigger either cold/standby boot.
* Note: Transition request from this function does not trigger cold/standby
* boot. It just puts back driver book keeping variable back to ACTIVE
* state and restores the interrupt.
*
* Returns -ENODEV if device not found, -EAGAIN if the driver cannot currently
* satisfy the requested state-transition or -EINVAL for all other
* state-transition requests.
*/
int arche_platform_change_state(enum arche_platform_state state,
struct gb_timesync_svc *timesync_svc_pdata)
{
struct arche_platform_drvdata *arche_pdata;
struct platform_device *pdev;
struct device_node *np;
int ret = -EAGAIN;
unsigned long flags;
np = of_find_compatible_node(NULL, NULL, "google,arche-platform");
if (!np) {
pr_err("google,arche-platform device node not found\n");
return -ENODEV;
}
pdev = of_find_device_by_node(np);
if (!pdev) {
pr_err("arche-platform device not found\n");
return -ENODEV;
}
arche_pdata = platform_get_drvdata(pdev);
mutex_lock(&arche_pdata->platform_state_mutex);
spin_lock_irqsave(&arche_pdata->wake_lock, flags);
if (arche_pdata->state == state) {
ret = 0;
goto exit;
}
switch (state) {
case ARCHE_PLATFORM_STATE_TIME_SYNC:
if (arche_pdata->state != ARCHE_PLATFORM_STATE_ACTIVE) {
ret = -EINVAL;
goto exit;
}
if (arche_pdata->wake_detect_state != WD_STATE_IDLE) {
dev_err(arche_pdata->dev,
"driver busy with wake/detect line ops\n");
goto exit;
}
device_for_each_child(arche_pdata->dev, NULL,
arche_apb_bootret_assert);
arche_pdata->wake_detect_state = WD_STATE_TIMESYNC;
break;
case ARCHE_PLATFORM_STATE_ACTIVE:
if (arche_pdata->state != ARCHE_PLATFORM_STATE_TIME_SYNC) {
ret = -EINVAL;
goto exit;
}
device_for_each_child(arche_pdata->dev, NULL,
arche_apb_bootret_deassert);
arche_pdata->wake_detect_state = WD_STATE_IDLE;
break;
case ARCHE_PLATFORM_STATE_OFF:
case ARCHE_PLATFORM_STATE_STANDBY:
case ARCHE_PLATFORM_STATE_FW_FLASHING:
dev_err(arche_pdata->dev, "busy, request to retry later\n");
goto exit;
default:
ret = -EINVAL;
dev_err(arche_pdata->dev,
"invalid state transition request\n");
goto exit;
}
arche_pdata->timesync_svc_pdata = timesync_svc_pdata;
arche_platform_set_state(arche_pdata, state);
if (state == ARCHE_PLATFORM_STATE_ACTIVE)
wake_up(&arche_pdata->wq);
ret = 0;
exit:
spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
mutex_unlock(&arche_pdata->platform_state_mutex);
of_node_put(np);
return ret;
}
EXPORT_SYMBOL_GPL(arche_platform_change_state);
/* Requires arche_pdata->wake_lock is held by calling context */
static void arche_platform_set_wake_detect_state(
struct arche_platform_drvdata *arche_pdata,
enum svc_wakedetect_state state)
{
arche_pdata->wake_detect_state = state;
}
static inline void svc_reset_onoff(unsigned int gpio, bool onoff)
{
gpio_set_value(gpio, onoff);
}
static int apb_cold_boot(struct device *dev, void *data)
{
int ret;
ret = apb_ctrl_coldboot(dev);
if (ret)
dev_warn(dev, "failed to coldboot\n");
/*Child nodes are independent, so do not exit coldboot operation */
return 0;
}
static int apb_poweroff(struct device *dev, void *data)
{
apb_ctrl_poweroff(dev);
/* Enable HUB3613 into HUB mode. */
if (usb3613_hub_mode_ctrl(false))
dev_warn(dev, "failed to control hub device\n");
return 0;
}
static void arche_platform_wd_irq_en(struct arche_platform_drvdata *arche_pdata)
{
/* Enable interrupt here, to read event back from SVC */
gpio_direction_input(arche_pdata->wake_detect_gpio);
enable_irq(arche_pdata->wake_detect_irq);
}
static irqreturn_t arche_platform_wd_irq_thread(int irq, void *devid)
{
struct arche_platform_drvdata *arche_pdata = devid;
unsigned long flags;
spin_lock_irqsave(&arche_pdata->wake_lock, flags);
if (arche_pdata->wake_detect_state != WD_STATE_COLDBOOT_TRIG) {
/* Something is wrong */
spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
return IRQ_HANDLED;
}
arche_platform_set_wake_detect_state(arche_pdata,
WD_STATE_COLDBOOT_START);
spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
/* It should complete power cycle, so first make sure it is poweroff */
device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
/* Bring APB out of reset: cold boot sequence */
device_for_each_child(arche_pdata->dev, NULL, apb_cold_boot);
/* Enable HUB3613 into HUB mode. */
if (usb3613_hub_mode_ctrl(true))
dev_warn(arche_pdata->dev, "failed to control hub device\n");
spin_lock_irqsave(&arche_pdata->wake_lock, flags);
arche_platform_set_wake_detect_state(arche_pdata, WD_STATE_IDLE);
spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t arche_platform_wd_irq(int irq, void *devid)
{
struct arche_platform_drvdata *arche_pdata = devid;
unsigned long flags;
spin_lock_irqsave(&arche_pdata->wake_lock, flags);
if (arche_pdata->wake_detect_state == WD_STATE_TIMESYNC) {
gb_timesync_irq(arche_pdata->timesync_svc_pdata);
goto exit;
}
if (gpio_get_value(arche_pdata->wake_detect_gpio)) {
/* wake/detect rising */
/*
* If wake/detect line goes high after low, within less than
* 30msec, then standby boot sequence is initiated, which is not
* supported/implemented as of now. So ignore it.
*/
if (arche_pdata->wake_detect_state == WD_STATE_BOOT_INIT) {
if (time_before(jiffies,
arche_pdata->wake_detect_start +
msecs_to_jiffies(WD_COLDBOOT_PULSE_WIDTH_MS))) {
arche_platform_set_wake_detect_state(arche_pdata,
WD_STATE_IDLE);
} else {
/* Check we are not in middle of irq thread already */
if (arche_pdata->wake_detect_state !=
WD_STATE_COLDBOOT_START) {
arche_platform_set_wake_detect_state(arche_pdata,
WD_STATE_COLDBOOT_TRIG);
spin_unlock_irqrestore(
&arche_pdata->wake_lock,
flags);
return IRQ_WAKE_THREAD;
}
}
}
} else {
/* wake/detect falling */
if (arche_pdata->wake_detect_state == WD_STATE_IDLE) {
arche_pdata->wake_detect_start = jiffies;
/*
* In the begining, when wake/detect goes low (first time), we assume
* it is meant for coldboot and set the flag. If wake/detect line stays low
* beyond 30msec, then it is coldboot else fallback to standby boot.
*/
arche_platform_set_wake_detect_state(arche_pdata,
WD_STATE_BOOT_INIT);
}
}
exit:
spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
return IRQ_HANDLED;
}
/*
* Requires arche_pdata->platform_state_mutex to be held
*/
static int arche_platform_coldboot_seq(struct arche_platform_drvdata *arche_pdata)
{
int ret;
if (arche_pdata->state == ARCHE_PLATFORM_STATE_ACTIVE)
return 0;
dev_info(arche_pdata->dev, "Booting from cold boot state\n");
svc_reset_onoff(arche_pdata->svc_reset_gpio,
arche_pdata->is_reset_act_hi);
gpio_set_value(arche_pdata->svc_sysboot_gpio, 0);
usleep_range(100, 200);
ret = clk_prepare_enable(arche_pdata->svc_ref_clk);
if (ret) {
dev_err(arche_pdata->dev, "failed to enable svc_ref_clk: %d\n",
ret);
return ret;
}
/* bring SVC out of reset */
svc_reset_onoff(arche_pdata->svc_reset_gpio,
!arche_pdata->is_reset_act_hi);
arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_ACTIVE);
return 0;
}
/*
* Requires arche_pdata->platform_state_mutex to be held
*/
static int arche_platform_fw_flashing_seq(struct arche_platform_drvdata *arche_pdata)
{
int ret;
if (arche_pdata->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
return 0;
dev_info(arche_pdata->dev, "Switching to FW flashing state\n");
svc_reset_onoff(arche_pdata->svc_reset_gpio,
arche_pdata->is_reset_act_hi);
gpio_set_value(arche_pdata->svc_sysboot_gpio, 1);
usleep_range(100, 200);
ret = clk_prepare_enable(arche_pdata->svc_ref_clk);
if (ret) {
dev_err(arche_pdata->dev, "failed to enable svc_ref_clk: %d\n",
ret);
return ret;
}
svc_reset_onoff(arche_pdata->svc_reset_gpio,
!arche_pdata->is_reset_act_hi);
arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_FW_FLASHING);
return 0;
}
/*
* Requires arche_pdata->platform_state_mutex to be held
*/
static void arche_platform_poweroff_seq(struct arche_platform_drvdata *arche_pdata)
{
unsigned long flags;
if (arche_pdata->state == ARCHE_PLATFORM_STATE_OFF)
return;
/* If in fw_flashing mode, then no need to repeate things again */
if (arche_pdata->state != ARCHE_PLATFORM_STATE_FW_FLASHING) {
disable_irq(arche_pdata->wake_detect_irq);
spin_lock_irqsave(&arche_pdata->wake_lock, flags);
arche_platform_set_wake_detect_state(arche_pdata,
WD_STATE_IDLE);
spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
}
clk_disable_unprepare(arche_pdata->svc_ref_clk);
/* As part of exit, put APB back in reset state */
svc_reset_onoff(arche_pdata->svc_reset_gpio,
arche_pdata->is_reset_act_hi);
arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_OFF);
}
static ssize_t state_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct platform_device *pdev = to_platform_device(dev);
struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev);
int ret = 0;
retry:
mutex_lock(&arche_pdata->platform_state_mutex);
if (arche_pdata->state == ARCHE_PLATFORM_STATE_TIME_SYNC) {
mutex_unlock(&arche_pdata->platform_state_mutex);
ret = wait_event_interruptible(
arche_pdata->wq,
arche_pdata->state != ARCHE_PLATFORM_STATE_TIME_SYNC);
if (ret)
return ret;
goto retry;
}
if (sysfs_streq(buf, "off")) {
if (arche_pdata->state == ARCHE_PLATFORM_STATE_OFF)
goto exit;
/* If SVC goes down, bring down APB's as well */
device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
arche_platform_poweroff_seq(arche_pdata);
} else if (sysfs_streq(buf, "active")) {
if (arche_pdata->state == ARCHE_PLATFORM_STATE_ACTIVE)
goto exit;
/* First we want to make sure we power off everything
* and then activate back again */
device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
arche_platform_poweroff_seq(arche_pdata);
arche_platform_wd_irq_en(arche_pdata);
ret = arche_platform_coldboot_seq(arche_pdata);
if (ret)
goto exit;
} else if (sysfs_streq(buf, "standby")) {
if (arche_pdata->state == ARCHE_PLATFORM_STATE_STANDBY)
goto exit;
dev_warn(arche_pdata->dev, "standby state not supported\n");
} else if (sysfs_streq(buf, "fw_flashing")) {
if (arche_pdata->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
goto exit;
/*
* Here we only control SVC.
*
* In case of FW_FLASHING mode we do not want to control
* APBs, as in case of V2, SPI bus is shared between both
* the APBs. So let user chose which APB he wants to flash.
*/
arche_platform_poweroff_seq(arche_pdata);
ret = arche_platform_fw_flashing_seq(arche_pdata);
if (ret)
goto exit;
} else {
dev_err(arche_pdata->dev, "unknown state\n");
ret = -EINVAL;
}
exit:
mutex_unlock(&arche_pdata->platform_state_mutex);
return ret ? ret : count;
}
static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct arche_platform_drvdata *arche_pdata = dev_get_drvdata(dev);
switch (arche_pdata->state) {
case ARCHE_PLATFORM_STATE_OFF:
return sprintf(buf, "off\n");
case ARCHE_PLATFORM_STATE_ACTIVE:
return sprintf(buf, "active\n");
case ARCHE_PLATFORM_STATE_STANDBY:
return sprintf(buf, "standby\n");
case ARCHE_PLATFORM_STATE_FW_FLASHING:
return sprintf(buf, "fw_flashing\n");
case ARCHE_PLATFORM_STATE_TIME_SYNC:
return sprintf(buf, "time_sync\n");
default:
return sprintf(buf, "unknown state\n");
}
}
static DEVICE_ATTR_RW(state);
static int arche_platform_pm_notifier(struct notifier_block *notifier,
unsigned long pm_event, void *unused)
{
struct arche_platform_drvdata *arche_pdata =
container_of(notifier, struct arche_platform_drvdata,
pm_notifier);
int ret = NOTIFY_DONE;
mutex_lock(&arche_pdata->platform_state_mutex);
switch (pm_event) {
case PM_SUSPEND_PREPARE:
if (arche_pdata->state != ARCHE_PLATFORM_STATE_ACTIVE) {
ret = NOTIFY_STOP;
break;
}
device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
arche_platform_poweroff_seq(arche_pdata);
break;
case PM_POST_SUSPEND:
if (arche_pdata->state != ARCHE_PLATFORM_STATE_OFF)
break;
arche_platform_wd_irq_en(arche_pdata);
arche_platform_coldboot_seq(arche_pdata);
break;
default:
break;
}
mutex_unlock(&arche_pdata->platform_state_mutex);
return ret;
}
static int arche_platform_probe(struct platform_device *pdev)
{
struct arche_platform_drvdata *arche_pdata;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
int ret;
arche_pdata = devm_kzalloc(&pdev->dev, sizeof(*arche_pdata), GFP_KERNEL);
if (!arche_pdata)
return -ENOMEM;
/* setup svc reset gpio */
arche_pdata->is_reset_act_hi = of_property_read_bool(np,
"svc,reset-active-high");
arche_pdata->svc_reset_gpio = of_get_named_gpio(np, "svc,reset-gpio", 0);
if (arche_pdata->svc_reset_gpio < 0) {
dev_err(dev, "failed to get reset-gpio\n");
return arche_pdata->svc_reset_gpio;
}
ret = devm_gpio_request(dev, arche_pdata->svc_reset_gpio, "svc-reset");
if (ret) {
dev_err(dev, "failed to request svc-reset gpio:%d\n", ret);
return ret;
}
ret = gpio_direction_output(arche_pdata->svc_reset_gpio,
arche_pdata->is_reset_act_hi);
if (ret) {
dev_err(dev, "failed to set svc-reset gpio dir:%d\n", ret);
return ret;
}
arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_OFF);
arche_pdata->svc_sysboot_gpio = of_get_named_gpio(np,
"svc,sysboot-gpio", 0);
if (arche_pdata->svc_sysboot_gpio < 0) {
dev_err(dev, "failed to get sysboot gpio\n");
return arche_pdata->svc_sysboot_gpio;
}
ret = devm_gpio_request(dev, arche_pdata->svc_sysboot_gpio, "sysboot0");
if (ret) {
dev_err(dev, "failed to request sysboot0 gpio:%d\n", ret);
return ret;
}
ret = gpio_direction_output(arche_pdata->svc_sysboot_gpio, 0);
if (ret) {
dev_err(dev, "failed to set svc-reset gpio dir:%d\n", ret);
return ret;
}
/* setup the clock request gpio first */
arche_pdata->svc_refclk_req = of_get_named_gpio(np,
"svc,refclk-req-gpio", 0);
if (arche_pdata->svc_refclk_req < 0) {
dev_err(dev, "failed to get svc clock-req gpio\n");
return arche_pdata->svc_refclk_req;
}
ret = devm_gpio_request(dev, arche_pdata->svc_refclk_req, "svc-clk-req");
if (ret) {
dev_err(dev, "failed to request svc-clk-req gpio: %d\n", ret);
return ret;
}
ret = gpio_direction_input(arche_pdata->svc_refclk_req);
if (ret) {
dev_err(dev, "failed to set svc-clk-req gpio dir :%d\n", ret);
return ret;
}
/* setup refclk2 to follow the pin */
arche_pdata->svc_ref_clk = devm_clk_get(dev, "svc_ref_clk");
if (IS_ERR(arche_pdata->svc_ref_clk)) {
ret = PTR_ERR(arche_pdata->svc_ref_clk);
dev_err(dev, "failed to get svc_ref_clk: %d\n", ret);
return ret;
}
platform_set_drvdata(pdev, arche_pdata);
arche_pdata->num_apbs = of_get_child_count(np);
dev_dbg(dev, "Number of APB's available - %d\n", arche_pdata->num_apbs);
arche_pdata->wake_detect_gpio = of_get_named_gpio(np, "svc,wake-detect-gpio", 0);
if (arche_pdata->wake_detect_gpio < 0) {
dev_err(dev, "failed to get wake detect gpio\n");
ret = arche_pdata->wake_detect_gpio;
return ret;
}
ret = devm_gpio_request(dev, arche_pdata->wake_detect_gpio, "wake detect");
if (ret) {
dev_err(dev, "Failed requesting wake_detect gpio %d\n",
arche_pdata->wake_detect_gpio);
return ret;
}
arche_platform_set_wake_detect_state(arche_pdata, WD_STATE_IDLE);
arche_pdata->dev = &pdev->dev;
spin_lock_init(&arche_pdata->wake_lock);
mutex_init(&arche_pdata->platform_state_mutex);
init_waitqueue_head(&arche_pdata->wq);
arche_pdata->wake_detect_irq =
gpio_to_irq(arche_pdata->wake_detect_gpio);
ret = devm_request_threaded_irq(dev, arche_pdata->wake_detect_irq,
arche_platform_wd_irq,
arche_platform_wd_irq_thread,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
dev_name(dev), arche_pdata);
if (ret) {
dev_err(dev, "failed to request wake detect IRQ %d\n", ret);
return ret;
}
disable_irq(arche_pdata->wake_detect_irq);
ret = device_create_file(dev, &dev_attr_state);
if (ret) {
dev_err(dev, "failed to create state file in sysfs\n");
return ret;
}
ret = of_platform_populate(np, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to populate child nodes %d\n", ret);
goto err_device_remove;
}
arche_pdata->pm_notifier.notifier_call = arche_platform_pm_notifier;
ret = register_pm_notifier(&arche_pdata->pm_notifier);
if (ret) {
dev_err(dev, "failed to register pm notifier %d\n", ret);
goto err_device_remove;
}
/* Register callback pointer */
arche_platform_change_state_cb = arche_platform_change_state;
/* Explicitly power off if requested */
if (!of_property_read_bool(pdev->dev.of_node, "arche,init-off")) {
mutex_lock(&arche_pdata->platform_state_mutex);
ret = arche_platform_coldboot_seq(arche_pdata);
if (ret) {
dev_err(dev, "Failed to cold boot svc %d\n", ret);
goto err_coldboot;
}
arche_platform_wd_irq_en(arche_pdata);
mutex_unlock(&arche_pdata->platform_state_mutex);
}
dev_info(dev, "Device registered successfully\n");
return 0;
err_coldboot:
mutex_unlock(&arche_pdata->platform_state_mutex);
err_device_remove:
device_remove_file(&pdev->dev, &dev_attr_state);
return ret;
}
static int arche_remove_child(struct device *dev, void *unused)
{
struct platform_device *pdev = to_platform_device(dev);
platform_device_unregister(pdev);
return 0;
}
static int arche_platform_remove(struct platform_device *pdev)
{
struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev);
unregister_pm_notifier(&arche_pdata->pm_notifier);
device_remove_file(&pdev->dev, &dev_attr_state);
device_for_each_child(&pdev->dev, NULL, arche_remove_child);
arche_platform_poweroff_seq(arche_pdata);
platform_set_drvdata(pdev, NULL);
if (usb3613_hub_mode_ctrl(false))
dev_warn(arche_pdata->dev, "failed to control hub device\n");
/* TODO: Should we do anything more here ?? */
return 0;
}
static int arche_platform_suspend(struct device *dev)
{
/*
* If timing profile premits, we may shutdown bridge
* completely
*
* TODO: sequence ??
*
* Also, need to make sure we meet precondition for unipro suspend
* Precondition: Definition ???
*/
return 0;
}
static int arche_platform_resume(struct device *dev)
{
/*
* Atleast for ES2 we have to meet the delay requirement between
* unipro switch and AP bridge init, depending on whether bridge is in
* OFF state or standby state.
*
* Based on whether bridge is in standby or OFF state we may have to
* assert multiple signals. Please refer to WDM spec, for more info.
*
*/
return 0;
}
static void arche_platform_shutdown(struct platform_device *pdev)
{
struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev);
arche_platform_poweroff_seq(arche_pdata);
usb3613_hub_mode_ctrl(false);
}
static SIMPLE_DEV_PM_OPS(arche_platform_pm_ops,
arche_platform_suspend,
arche_platform_resume);
static struct of_device_id arche_platform_of_match[] = {
{ .compatible = "google,arche-platform", }, /* Use PID/VID of SVC device */
{ },
};
static struct of_device_id arche_combined_id[] = {
{ .compatible = "google,arche-platform", }, /* Use PID/VID of SVC device */
{ .compatible = "usbffff,2", },
{ },
};
MODULE_DEVICE_TABLE(of, arche_combined_id);
static struct platform_driver arche_platform_device_driver = {
.probe = arche_platform_probe,
.remove = arche_platform_remove,
.shutdown = arche_platform_shutdown,
.driver = {
.name = "arche-platform-ctrl",
.pm = &arche_platform_pm_ops,
.of_match_table = arche_platform_of_match,
}
};
static int __init arche_init(void)
{
int retval;
retval = platform_driver_register(&arche_platform_device_driver);
if (retval)
return retval;
retval = arche_apb_init();
if (retval)
platform_driver_unregister(&arche_platform_device_driver);
return retval;
}
module_init(arche_init);
static void __exit arche_exit(void)
{
arche_apb_exit();
platform_driver_unregister(&arche_platform_device_driver);
}
module_exit(arche_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Vaibhav Hiremath <vaibhav.hiremath@linaro.org>");
MODULE_DESCRIPTION("Arche Platform Driver");

View File

@ -0,0 +1,39 @@
/*
* Arche Platform driver to enable Unipro link.
*
* Copyright 2015-2016 Google Inc.
* Copyright 2015-2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __ARCHE_PLATFORM_H
#define __ARCHE_PLATFORM_H
#include "timesync.h"
enum arche_platform_state {
ARCHE_PLATFORM_STATE_OFF,
ARCHE_PLATFORM_STATE_ACTIVE,
ARCHE_PLATFORM_STATE_STANDBY,
ARCHE_PLATFORM_STATE_FW_FLASHING,
ARCHE_PLATFORM_STATE_TIME_SYNC,
};
int arche_platform_change_state(enum arche_platform_state state,
struct gb_timesync_svc *pdata);
extern int (*arche_platform_change_state_cb)(enum arche_platform_state state,
struct gb_timesync_svc *pdata);
int __init arche_apb_init(void);
void __exit arche_apb_exit(void);
/* Operational states for the APB device */
int apb_ctrl_coldboot(struct device *dev);
int apb_ctrl_fw_flashing(struct device *dev);
int apb_ctrl_standby_boot(struct device *dev);
void apb_ctrl_poweroff(struct device *dev);
void apb_bootret_assert(struct device *dev);
void apb_bootret_deassert(struct device *dev);
#endif /* __ARCHE_PLATFORM_H */

View File

@ -0,0 +1,109 @@
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details.
*
* BSD LICENSE
*
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. or Linaro Ltd. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
* LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __ARPC_H
#define __ARPC_H
/* APBridgeA RPC (ARPC) */
enum arpc_result {
ARPC_SUCCESS = 0x00,
ARPC_NO_MEMORY = 0x01,
ARPC_INVALID = 0x02,
ARPC_TIMEOUT = 0x03,
ARPC_UNKNOWN_ERROR = 0xff,
};
struct arpc_request_message {
__le16 id; /* RPC unique id */
__le16 size; /* Size in bytes of header + payload */
__u8 type; /* RPC type */
__u8 data[0]; /* ARPC data */
} __packed;
struct arpc_response_message {
__le16 id; /* RPC unique id */
__u8 result; /* Result of RPC */
} __packed;
/* ARPC requests */
#define ARPC_TYPE_CPORT_CONNECTED 0x01
#define ARPC_TYPE_CPORT_QUIESCE 0x02
#define ARPC_TYPE_CPORT_CLEAR 0x03
#define ARPC_TYPE_CPORT_FLUSH 0x04
#define ARPC_TYPE_CPORT_SHUTDOWN 0x05
struct arpc_cport_connected_req {
__le16 cport_id;
} __packed;
struct arpc_cport_quiesce_req {
__le16 cport_id;
__le16 peer_space;
__le16 timeout;
} __packed;
struct arpc_cport_clear_req {
__le16 cport_id;
} __packed;
struct arpc_cport_flush_req {
__le16 cport_id;
} __packed;
struct arpc_cport_shutdown_req {
__le16 cport_id;
__le16 timeout;
__u8 phase;
} __packed;
#endif /* __ARPC_H */

View File

@ -0,0 +1,207 @@
/*
* Greybus Audio Device Class Protocol helpers
*
* Copyright 2015-2016 Google Inc.
*
* Released under the GPLv2 only.
*/
#include "greybus.h"
#include "greybus_protocols.h"
#include "audio_apbridgea.h"
#include "audio_codec.h"
int gb_audio_apbridgea_set_config(struct gb_connection *connection,
__u16 i2s_port, __u32 format, __u32 rate,
__u32 mclk_freq)
{
struct audio_apbridgea_set_config_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_CONFIG;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
req.format = cpu_to_le32(format);
req.rate = cpu_to_le32(rate);
req.mclk_freq = cpu_to_le32(mclk_freq);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_set_config);
int gb_audio_apbridgea_register_cport(struct gb_connection *connection,
__u16 i2s_port, __u16 cportid,
__u8 direction)
{
struct audio_apbridgea_register_cport_request req;
int ret;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_REGISTER_CPORT;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
req.cport = cpu_to_le16(cportid);
req.direction = direction;
ret = gb_pm_runtime_get_sync(connection->bundle);
if (ret)
return ret;
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_register_cport);
int gb_audio_apbridgea_unregister_cport(struct gb_connection *connection,
__u16 i2s_port, __u16 cportid,
__u8 direction)
{
struct audio_apbridgea_unregister_cport_request req;
int ret;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_UNREGISTER_CPORT;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
req.cport = cpu_to_le16(cportid);
req.direction = direction;
ret = gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
gb_pm_runtime_put_autosuspend(connection->bundle);
return ret;
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_unregister_cport);
int gb_audio_apbridgea_set_tx_data_size(struct gb_connection *connection,
__u16 i2s_port, __u16 size)
{
struct audio_apbridgea_set_tx_data_size_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_TX_DATA_SIZE;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
req.size = cpu_to_le16(size);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_set_tx_data_size);
int gb_audio_apbridgea_prepare_tx(struct gb_connection *connection,
__u16 i2s_port)
{
struct audio_apbridgea_prepare_tx_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_PREPARE_TX;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_prepare_tx);
int gb_audio_apbridgea_start_tx(struct gb_connection *connection,
__u16 i2s_port, __u64 timestamp)
{
struct audio_apbridgea_start_tx_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_START_TX;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
req.timestamp = cpu_to_le64(timestamp);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_start_tx);
int gb_audio_apbridgea_stop_tx(struct gb_connection *connection, __u16 i2s_port)
{
struct audio_apbridgea_stop_tx_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_STOP_TX;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_stop_tx);
int gb_audio_apbridgea_shutdown_tx(struct gb_connection *connection,
__u16 i2s_port)
{
struct audio_apbridgea_shutdown_tx_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_SHUTDOWN_TX;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_shutdown_tx);
int gb_audio_apbridgea_set_rx_data_size(struct gb_connection *connection,
__u16 i2s_port, __u16 size)
{
struct audio_apbridgea_set_rx_data_size_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_RX_DATA_SIZE;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
req.size = cpu_to_le16(size);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_set_rx_data_size);
int gb_audio_apbridgea_prepare_rx(struct gb_connection *connection,
__u16 i2s_port)
{
struct audio_apbridgea_prepare_rx_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_PREPARE_RX;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_prepare_rx);
int gb_audio_apbridgea_start_rx(struct gb_connection *connection,
__u16 i2s_port)
{
struct audio_apbridgea_start_rx_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_START_RX;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_start_rx);
int gb_audio_apbridgea_stop_rx(struct gb_connection *connection, __u16 i2s_port)
{
struct audio_apbridgea_stop_rx_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_STOP_RX;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_stop_rx);
int gb_audio_apbridgea_shutdown_rx(struct gb_connection *connection,
__u16 i2s_port)
{
struct audio_apbridgea_shutdown_rx_request req;
req.hdr.type = AUDIO_APBRIDGEA_TYPE_SHUTDOWN_RX;
req.hdr.i2s_port = cpu_to_le16(i2s_port);
return gb_hd_output(connection->hd, &req, sizeof(req),
GB_APB_REQUEST_AUDIO_CONTROL, true);
}
EXPORT_SYMBOL_GPL(gb_audio_apbridgea_shutdown_rx);
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("greybus:audio-apbridgea");
MODULE_DESCRIPTION("Greybus Special APBridgeA Audio Protocol library");
MODULE_AUTHOR("Mark Greer <mgreer@animalcreek.com>");

View File

@ -0,0 +1,156 @@
/**
* Copyright (c) 2015-2016 Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This is a special protocol for configuring communication over the
* I2S bus between the DSP on the MSM8994 and APBridgeA. Therefore,
* we can predefine several low-level attributes of the communication
* because we know that they are supported. In particular, the following
* assumptions are made:
* - there are two channels (i.e., stereo)
* - the low-level protocol is I2S as defined by Philips/NXP
* - the DSP on the MSM8994 is the clock master for MCLK, BCLK, and WCLK
* - WCLK changes on the falling edge of BCLK
* - WCLK low for left channel; high for right channel
* - TX data is sent on the falling edge of BCLK
* - RX data is received/latched on the rising edge of BCLK
*/
#ifndef __AUDIO_APBRIDGEA_H
#define __AUDIO_APBRIDGEA_H
#define AUDIO_APBRIDGEA_TYPE_SET_CONFIG 0x01
#define AUDIO_APBRIDGEA_TYPE_REGISTER_CPORT 0x02
#define AUDIO_APBRIDGEA_TYPE_UNREGISTER_CPORT 0x03
#define AUDIO_APBRIDGEA_TYPE_SET_TX_DATA_SIZE 0x04
/* 0x05 unused */
#define AUDIO_APBRIDGEA_TYPE_PREPARE_TX 0x06
#define AUDIO_APBRIDGEA_TYPE_START_TX 0x07
#define AUDIO_APBRIDGEA_TYPE_STOP_TX 0x08
#define AUDIO_APBRIDGEA_TYPE_SHUTDOWN_TX 0x09
#define AUDIO_APBRIDGEA_TYPE_SET_RX_DATA_SIZE 0x0a
/* 0x0b unused */
#define AUDIO_APBRIDGEA_TYPE_PREPARE_RX 0x0c
#define AUDIO_APBRIDGEA_TYPE_START_RX 0x0d
#define AUDIO_APBRIDGEA_TYPE_STOP_RX 0x0e
#define AUDIO_APBRIDGEA_TYPE_SHUTDOWN_RX 0x0f
#define AUDIO_APBRIDGEA_PCM_FMT_8 BIT(0)
#define AUDIO_APBRIDGEA_PCM_FMT_16 BIT(1)
#define AUDIO_APBRIDGEA_PCM_FMT_24 BIT(2)
#define AUDIO_APBRIDGEA_PCM_FMT_32 BIT(3)
#define AUDIO_APBRIDGEA_PCM_FMT_64 BIT(4)
#define AUDIO_APBRIDGEA_PCM_RATE_5512 BIT(0)
#define AUDIO_APBRIDGEA_PCM_RATE_8000 BIT(1)
#define AUDIO_APBRIDGEA_PCM_RATE_11025 BIT(2)
#define AUDIO_APBRIDGEA_PCM_RATE_16000 BIT(3)
#define AUDIO_APBRIDGEA_PCM_RATE_22050 BIT(4)
#define AUDIO_APBRIDGEA_PCM_RATE_32000 BIT(5)
#define AUDIO_APBRIDGEA_PCM_RATE_44100 BIT(6)
#define AUDIO_APBRIDGEA_PCM_RATE_48000 BIT(7)
#define AUDIO_APBRIDGEA_PCM_RATE_64000 BIT(8)
#define AUDIO_APBRIDGEA_PCM_RATE_88200 BIT(9)
#define AUDIO_APBRIDGEA_PCM_RATE_96000 BIT(10)
#define AUDIO_APBRIDGEA_PCM_RATE_176400 BIT(11)
#define AUDIO_APBRIDGEA_PCM_RATE_192000 BIT(12)
#define AUDIO_APBRIDGEA_DIRECTION_TX BIT(0)
#define AUDIO_APBRIDGEA_DIRECTION_RX BIT(1)
/* The I2S port is passed in the 'index' parameter of the USB request */
/* The CPort is passed in the 'value' parameter of the USB request */
struct audio_apbridgea_hdr {
__u8 type;
__le16 i2s_port;
__u8 data[0];
} __packed;
struct audio_apbridgea_set_config_request {
struct audio_apbridgea_hdr hdr;
__le32 format; /* AUDIO_APBRIDGEA_PCM_FMT_* */
__le32 rate; /* AUDIO_APBRIDGEA_PCM_RATE_* */
__le32 mclk_freq; /* XXX Remove? */
} __packed;
struct audio_apbridgea_register_cport_request {
struct audio_apbridgea_hdr hdr;
__le16 cport;
__u8 direction;
} __packed;
struct audio_apbridgea_unregister_cport_request {
struct audio_apbridgea_hdr hdr;
__le16 cport;
__u8 direction;
} __packed;
struct audio_apbridgea_set_tx_data_size_request {
struct audio_apbridgea_hdr hdr;
__le16 size;
} __packed;
struct audio_apbridgea_prepare_tx_request {
struct audio_apbridgea_hdr hdr;
} __packed;
struct audio_apbridgea_start_tx_request {
struct audio_apbridgea_hdr hdr;
__le64 timestamp;
} __packed;
struct audio_apbridgea_stop_tx_request {
struct audio_apbridgea_hdr hdr;
} __packed;
struct audio_apbridgea_shutdown_tx_request {
struct audio_apbridgea_hdr hdr;
} __packed;
struct audio_apbridgea_set_rx_data_size_request {
struct audio_apbridgea_hdr hdr;
__le16 size;
} __packed;
struct audio_apbridgea_prepare_rx_request {
struct audio_apbridgea_hdr hdr;
} __packed;
struct audio_apbridgea_start_rx_request {
struct audio_apbridgea_hdr hdr;
} __packed;
struct audio_apbridgea_stop_rx_request {
struct audio_apbridgea_hdr hdr;
} __packed;
struct audio_apbridgea_shutdown_rx_request {
struct audio_apbridgea_hdr hdr;
} __packed;
#endif /*__AUDIO_APBRIDGEA_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,283 @@
/*
* Greybus audio driver
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __LINUX_GBAUDIO_CODEC_H
#define __LINUX_GBAUDIO_CODEC_H
#include <sound/soc.h>
#include <sound/jack.h>
#include "greybus.h"
#include "greybus_protocols.h"
#define NAME_SIZE 32
#define MAX_DAIS 2 /* APB1, APB2 */
enum {
APB1_PCM = 0,
APB2_PCM,
NUM_CODEC_DAIS,
};
enum gbcodec_reg_index {
GBCODEC_CTL_REG,
GBCODEC_MUTE_REG,
GBCODEC_PB_LVOL_REG,
GBCODEC_PB_RVOL_REG,
GBCODEC_CAP_LVOL_REG,
GBCODEC_CAP_RVOL_REG,
GBCODEC_APB1_MUX_REG,
GBCODEC_APB2_MUX_REG,
GBCODEC_REG_COUNT
};
/* device_type should be same as defined in audio.h (Android media layer) */
enum {
GBAUDIO_DEVICE_NONE = 0x0,
/* reserved bits */
GBAUDIO_DEVICE_BIT_IN = 0x80000000,
GBAUDIO_DEVICE_BIT_DEFAULT = 0x40000000,
/* output devices */
GBAUDIO_DEVICE_OUT_SPEAKER = 0x2,
GBAUDIO_DEVICE_OUT_WIRED_HEADSET = 0x4,
GBAUDIO_DEVICE_OUT_WIRED_HEADPHONE = 0x8,
/* input devices */
GBAUDIO_DEVICE_IN_BUILTIN_MIC = GBAUDIO_DEVICE_BIT_IN | 0x4,
GBAUDIO_DEVICE_IN_WIRED_HEADSET = GBAUDIO_DEVICE_BIT_IN | 0x10,
};
/* bit 0-SPK, 1-HP, 2-DAC,
* 4-MIC, 5-HSMIC, 6-MIC2
*/
#define GBCODEC_CTL_REG_DEFAULT 0x00
/* bit 0,1 - APB1-PB-L/R
* bit 2,3 - APB2-PB-L/R
* bit 4,5 - APB1-Cap-L/R
* bit 6,7 - APB2-Cap-L/R
*/
#define GBCODEC_MUTE_REG_DEFAULT 0x00
/* 0-127 steps */
#define GBCODEC_PB_VOL_REG_DEFAULT 0x00
#define GBCODEC_CAP_VOL_REG_DEFAULT 0x00
/* bit 0,1,2 - PB stereo, left, right
* bit 8,9,10 - Cap stereo, left, right
*/
#define GBCODEC_APB1_MUX_REG_DEFAULT 0x00
#define GBCODEC_APB2_MUX_REG_DEFAULT 0x00
#define GBCODEC_JACK_MASK 0x0000FFFF
#define GBCODEC_JACK_BUTTON_MASK 0xFFFF0000
static const u8 gbcodec_reg_defaults[GBCODEC_REG_COUNT] = {
GBCODEC_CTL_REG_DEFAULT,
GBCODEC_MUTE_REG_DEFAULT,
GBCODEC_PB_VOL_REG_DEFAULT,
GBCODEC_PB_VOL_REG_DEFAULT,
GBCODEC_CAP_VOL_REG_DEFAULT,
GBCODEC_CAP_VOL_REG_DEFAULT,
GBCODEC_APB1_MUX_REG_DEFAULT,
GBCODEC_APB2_MUX_REG_DEFAULT,
};
enum gbaudio_codec_state {
GBAUDIO_CODEC_SHUTDOWN = 0,
GBAUDIO_CODEC_STARTUP,
GBAUDIO_CODEC_HWPARAMS,
GBAUDIO_CODEC_PREPARE,
GBAUDIO_CODEC_START,
GBAUDIO_CODEC_STOP,
};
struct gbaudio_stream_params {
int state;
uint8_t sig_bits, channels;
uint32_t format, rate;
};
struct gbaudio_codec_dai {
int id;
/* runtime params for playback/capture streams */
struct gbaudio_stream_params params[2];
struct list_head list;
};
struct gbaudio_codec_info {
struct device *dev;
struct snd_soc_codec *codec;
struct list_head module_list;
/* to maintain runtime stream params for each DAI */
struct list_head dai_list;
struct mutex lock;
u8 reg[GBCODEC_REG_COUNT];
};
struct gbaudio_widget {
__u8 id;
const char *name;
struct list_head list;
};
struct gbaudio_control {
__u8 id;
char *name;
char *wname;
const char * const *texts;
int items;
struct list_head list;
};
struct gbaudio_data_connection {
int id;
__le16 data_cport;
struct gb_connection *connection;
struct list_head list;
/* maintain runtime state for playback/capture stream */
int state[2];
};
/* stream direction */
#define GB_PLAYBACK BIT(0)
#define GB_CAPTURE BIT(1)
enum gbaudio_module_state {
GBAUDIO_MODULE_OFF = 0,
GBAUDIO_MODULE_ON,
};
struct gbaudio_module_info {
/* module info */
struct device *dev;
int dev_id; /* check if it should be bundle_id/hd_cport_id */
int vid;
int pid;
int slot;
int type;
int set_uevent;
char vstr[NAME_SIZE];
char pstr[NAME_SIZE];
struct list_head list;
/* need to share this info to above user space */
int manager_id;
char name[NAME_SIZE];
unsigned int ip_devices;
unsigned int op_devices;
/* jack related */
char jack_name[NAME_SIZE];
char button_name[NAME_SIZE];
int jack_type;
int jack_mask;
int button_mask;
int button_status;
struct snd_soc_jack headset_jack;
struct snd_soc_jack button_jack;
/* connection info */
struct gb_connection *mgmt_connection;
size_t num_data_connections;
struct list_head data_list;
/* topology related */
int num_dais;
int num_controls;
int num_dapm_widgets;
int num_dapm_routes;
unsigned long dai_offset;
unsigned long widget_offset;
unsigned long control_offset;
unsigned long route_offset;
struct snd_kcontrol_new *controls;
struct snd_soc_dapm_widget *dapm_widgets;
struct snd_soc_dapm_route *dapm_routes;
struct snd_soc_dai_driver *dais;
struct list_head widget_list;
struct list_head ctl_list;
struct list_head widget_ctl_list;
struct gb_audio_topology *topology;
};
int gbaudio_tplg_parse_data(struct gbaudio_module_info *module,
struct gb_audio_topology *tplg_data);
void gbaudio_tplg_release(struct gbaudio_module_info *module);
int gbaudio_module_update(struct gbaudio_codec_info *codec,
struct snd_soc_dapm_widget *w,
struct gbaudio_module_info *module,
int enable);
int gbaudio_register_module(struct gbaudio_module_info *module);
void gbaudio_unregister_module(struct gbaudio_module_info *module);
/* protocol related */
extern int gb_audio_gb_get_topology(struct gb_connection *connection,
struct gb_audio_topology **topology);
extern int gb_audio_gb_get_control(struct gb_connection *connection,
uint8_t control_id, uint8_t index,
struct gb_audio_ctl_elem_value *value);
extern int gb_audio_gb_set_control(struct gb_connection *connection,
uint8_t control_id, uint8_t index,
struct gb_audio_ctl_elem_value *value);
extern int gb_audio_gb_enable_widget(struct gb_connection *connection,
uint8_t widget_id);
extern int gb_audio_gb_disable_widget(struct gb_connection *connection,
uint8_t widget_id);
extern int gb_audio_gb_get_pcm(struct gb_connection *connection,
uint16_t data_cport, uint32_t *format,
uint32_t *rate, uint8_t *channels,
uint8_t *sig_bits);
extern int gb_audio_gb_set_pcm(struct gb_connection *connection,
uint16_t data_cport, uint32_t format,
uint32_t rate, uint8_t channels,
uint8_t sig_bits);
extern int gb_audio_gb_set_tx_data_size(struct gb_connection *connection,
uint16_t data_cport, uint16_t size);
extern int gb_audio_gb_activate_tx(struct gb_connection *connection,
uint16_t data_cport);
extern int gb_audio_gb_deactivate_tx(struct gb_connection *connection,
uint16_t data_cport);
extern int gb_audio_gb_set_rx_data_size(struct gb_connection *connection,
uint16_t data_cport, uint16_t size);
extern int gb_audio_gb_activate_rx(struct gb_connection *connection,
uint16_t data_cport);
extern int gb_audio_gb_deactivate_rx(struct gb_connection *connection,
uint16_t data_cport);
extern int gb_audio_apbridgea_set_config(struct gb_connection *connection,
__u16 i2s_port, __u32 format,
__u32 rate, __u32 mclk_freq);
extern int gb_audio_apbridgea_register_cport(struct gb_connection *connection,
__u16 i2s_port, __u16 cportid,
__u8 direction);
extern int gb_audio_apbridgea_unregister_cport(struct gb_connection *connection,
__u16 i2s_port, __u16 cportid,
__u8 direction);
extern int gb_audio_apbridgea_set_tx_data_size(struct gb_connection *connection,
__u16 i2s_port, __u16 size);
extern int gb_audio_apbridgea_prepare_tx(struct gb_connection *connection,
__u16 i2s_port);
extern int gb_audio_apbridgea_start_tx(struct gb_connection *connection,
__u16 i2s_port, __u64 timestamp);
extern int gb_audio_apbridgea_stop_tx(struct gb_connection *connection,
__u16 i2s_port);
extern int gb_audio_apbridgea_shutdown_tx(struct gb_connection *connection,
__u16 i2s_port);
extern int gb_audio_apbridgea_set_rx_data_size(struct gb_connection *connection,
__u16 i2s_port, __u16 size);
extern int gb_audio_apbridgea_prepare_rx(struct gb_connection *connection,
__u16 i2s_port);
extern int gb_audio_apbridgea_start_rx(struct gb_connection *connection,
__u16 i2s_port);
extern int gb_audio_apbridgea_stop_rx(struct gb_connection *connection,
__u16 i2s_port);
extern int gb_audio_apbridgea_shutdown_rx(struct gb_connection *connection,
__u16 i2s_port);
#endif /* __LINUX_GBAUDIO_CODEC_H */

View File

@ -0,0 +1,228 @@
/*
* Greybus Audio Device Class Protocol helpers
*
* Copyright 2015-2016 Google Inc.
*
* Released under the GPLv2 only.
*/
#include "greybus.h"
#include "greybus_protocols.h"
#include "operation.h"
#include "audio_codec.h"
/* TODO: Split into separate calls */
int gb_audio_gb_get_topology(struct gb_connection *connection,
struct gb_audio_topology **topology)
{
struct gb_audio_get_topology_size_response size_resp;
struct gb_audio_topology *topo;
uint16_t size;
int ret;
ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_TOPOLOGY_SIZE,
NULL, 0, &size_resp, sizeof(size_resp));
if (ret)
return ret;
size = le16_to_cpu(size_resp.size);
if (size < sizeof(*topo))
return -ENODATA;
topo = kzalloc(size, GFP_KERNEL);
if (!topo)
return -ENOMEM;
ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_TOPOLOGY, NULL, 0,
topo, size);
if (ret) {
kfree(topo);
return ret;
}
*topology = topo;
return 0;
}
EXPORT_SYMBOL_GPL(gb_audio_gb_get_topology);
int gb_audio_gb_get_control(struct gb_connection *connection,
uint8_t control_id, uint8_t index,
struct gb_audio_ctl_elem_value *value)
{
struct gb_audio_get_control_request req;
struct gb_audio_get_control_response resp;
int ret;
req.control_id = control_id;
req.index = index;
ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_CONTROL,
&req, sizeof(req), &resp, sizeof(resp));
if (ret)
return ret;
memcpy(value, &resp.value, sizeof(*value));
return 0;
}
EXPORT_SYMBOL_GPL(gb_audio_gb_get_control);
int gb_audio_gb_set_control(struct gb_connection *connection,
uint8_t control_id, uint8_t index,
struct gb_audio_ctl_elem_value *value)
{
struct gb_audio_set_control_request req;
req.control_id = control_id;
req.index = index;
memcpy(&req.value, value, sizeof(req.value));
return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_CONTROL,
&req, sizeof(req), NULL, 0);
}
EXPORT_SYMBOL_GPL(gb_audio_gb_set_control);
int gb_audio_gb_enable_widget(struct gb_connection *connection,
uint8_t widget_id)
{
struct gb_audio_enable_widget_request req;
req.widget_id = widget_id;
return gb_operation_sync(connection, GB_AUDIO_TYPE_ENABLE_WIDGET,
&req, sizeof(req), NULL, 0);
}
EXPORT_SYMBOL_GPL(gb_audio_gb_enable_widget);
int gb_audio_gb_disable_widget(struct gb_connection *connection,
uint8_t widget_id)
{
struct gb_audio_disable_widget_request req;
req.widget_id = widget_id;
return gb_operation_sync(connection, GB_AUDIO_TYPE_DISABLE_WIDGET,
&req, sizeof(req), NULL, 0);
}
EXPORT_SYMBOL_GPL(gb_audio_gb_disable_widget);
int gb_audio_gb_get_pcm(struct gb_connection *connection, uint16_t data_cport,
uint32_t *format, uint32_t *rate, uint8_t *channels,
uint8_t *sig_bits)
{
struct gb_audio_get_pcm_request req;
struct gb_audio_get_pcm_response resp;
int ret;
req.data_cport = cpu_to_le16(data_cport);
ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_PCM,
&req, sizeof(req), &resp, sizeof(resp));
if (ret)
return ret;
*format = le32_to_cpu(resp.format);
*rate = le32_to_cpu(resp.rate);
*channels = resp.channels;
*sig_bits = resp.sig_bits;
return 0;
}
EXPORT_SYMBOL_GPL(gb_audio_gb_get_pcm);
int gb_audio_gb_set_pcm(struct gb_connection *connection, uint16_t data_cport,
uint32_t format, uint32_t rate, uint8_t channels,
uint8_t sig_bits)
{
struct gb_audio_set_pcm_request req;
req.data_cport = cpu_to_le16(data_cport);
req.format = cpu_to_le32(format);
req.rate = cpu_to_le32(rate);
req.channels = channels;
req.sig_bits = sig_bits;
return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_PCM,
&req, sizeof(req), NULL, 0);
}
EXPORT_SYMBOL_GPL(gb_audio_gb_set_pcm);
int gb_audio_gb_set_tx_data_size(struct gb_connection *connection,
uint16_t data_cport, uint16_t size)
{
struct gb_audio_set_tx_data_size_request req;
req.data_cport = cpu_to_le16(data_cport);
req.size = cpu_to_le16(size);
return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_TX_DATA_SIZE,
&req, sizeof(req), NULL, 0);
}
EXPORT_SYMBOL_GPL(gb_audio_gb_set_tx_data_size);
int gb_audio_gb_activate_tx(struct gb_connection *connection,
uint16_t data_cport)
{
struct gb_audio_activate_tx_request req;
req.data_cport = cpu_to_le16(data_cport);
return gb_operation_sync(connection, GB_AUDIO_TYPE_ACTIVATE_TX,
&req, sizeof(req), NULL, 0);
}
EXPORT_SYMBOL_GPL(gb_audio_gb_activate_tx);
int gb_audio_gb_deactivate_tx(struct gb_connection *connection,
uint16_t data_cport)
{
struct gb_audio_deactivate_tx_request req;
req.data_cport = cpu_to_le16(data_cport);
return gb_operation_sync(connection, GB_AUDIO_TYPE_DEACTIVATE_TX,
&req, sizeof(req), NULL, 0);
}
EXPORT_SYMBOL_GPL(gb_audio_gb_deactivate_tx);
int gb_audio_gb_set_rx_data_size(struct gb_connection *connection,
uint16_t data_cport, uint16_t size)
{
struct gb_audio_set_rx_data_size_request req;
req.data_cport = cpu_to_le16(data_cport);
req.size = cpu_to_le16(size);
return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_RX_DATA_SIZE,
&req, sizeof(req), NULL, 0);
}
EXPORT_SYMBOL_GPL(gb_audio_gb_set_rx_data_size);
int gb_audio_gb_activate_rx(struct gb_connection *connection,
uint16_t data_cport)
{
struct gb_audio_activate_rx_request req;
req.data_cport = cpu_to_le16(data_cport);
return gb_operation_sync(connection, GB_AUDIO_TYPE_ACTIVATE_RX,
&req, sizeof(req), NULL, 0);
}
EXPORT_SYMBOL_GPL(gb_audio_gb_activate_rx);
int gb_audio_gb_deactivate_rx(struct gb_connection *connection,
uint16_t data_cport)
{
struct gb_audio_deactivate_rx_request req;
req.data_cport = cpu_to_le16(data_cport);
return gb_operation_sync(connection, GB_AUDIO_TYPE_DEACTIVATE_RX,
&req, sizeof(req), NULL, 0);
}
EXPORT_SYMBOL_GPL(gb_audio_gb_deactivate_rx);
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("greybus:audio-gb");
MODULE_DESCRIPTION("Greybus Audio Device Class Protocol library");
MODULE_AUTHOR("Mark Greer <mgreer@animalcreek.com>");

View File

@ -0,0 +1,184 @@
/*
* Greybus operations
*
* Copyright 2015-2016 Google Inc.
*
* Released under the GPLv2 only.
*/
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/rwlock.h>
#include <linux/idr.h>
#include "audio_manager.h"
#include "audio_manager_private.h"
static struct kset *manager_kset;
static LIST_HEAD(modules_list);
static DECLARE_RWSEM(modules_rwsem);
static DEFINE_IDA(module_id);
/* helpers */
static struct gb_audio_manager_module *gb_audio_manager_get_locked(int id)
{
struct gb_audio_manager_module *module;
if (id < 0)
return NULL;
list_for_each_entry(module, &modules_list, list) {
if (module->id == id)
return module;
}
return NULL;
}
/* public API */
int gb_audio_manager_add(struct gb_audio_manager_module_descriptor *desc)
{
struct gb_audio_manager_module *module;
int id;
int err;
id = ida_simple_get(&module_id, 0, 0, GFP_KERNEL);
err = gb_audio_manager_module_create(&module, manager_kset,
id, desc);
if (err) {
ida_simple_remove(&module_id, id);
return err;
}
/* Add it to the list */
down_write(&modules_rwsem);
list_add_tail(&module->list, &modules_list);
up_write(&modules_rwsem);
return module->id;
}
EXPORT_SYMBOL_GPL(gb_audio_manager_add);
int gb_audio_manager_remove(int id)
{
struct gb_audio_manager_module *module;
down_write(&modules_rwsem);
module = gb_audio_manager_get_locked(id);
if (!module) {
up_write(&modules_rwsem);
return -EINVAL;
}
list_del(&module->list);
kobject_put(&module->kobj);
up_write(&modules_rwsem);
ida_simple_remove(&module_id, id);
return 0;
}
EXPORT_SYMBOL_GPL(gb_audio_manager_remove);
void gb_audio_manager_remove_all(void)
{
struct gb_audio_manager_module *module, *next;
int is_empty = 1;
down_write(&modules_rwsem);
list_for_each_entry_safe(module, next, &modules_list, list) {
list_del(&module->list);
kobject_put(&module->kobj);
ida_simple_remove(&module_id, module->id);
}
is_empty = list_empty(&modules_list);
up_write(&modules_rwsem);
if (!is_empty)
pr_warn("Not all nodes were deleted\n");
}
EXPORT_SYMBOL_GPL(gb_audio_manager_remove_all);
struct gb_audio_manager_module *gb_audio_manager_get_module(int id)
{
struct gb_audio_manager_module *module;
down_read(&modules_rwsem);
module = gb_audio_manager_get_locked(id);
kobject_get(&module->kobj);
up_read(&modules_rwsem);
return module;
}
EXPORT_SYMBOL_GPL(gb_audio_manager_get_module);
void gb_audio_manager_put_module(struct gb_audio_manager_module *module)
{
kobject_put(&module->kobj);
}
EXPORT_SYMBOL_GPL(gb_audio_manager_put_module);
int gb_audio_manager_dump_module(int id)
{
struct gb_audio_manager_module *module;
down_read(&modules_rwsem);
module = gb_audio_manager_get_locked(id);
up_read(&modules_rwsem);
if (!module)
return -EINVAL;
gb_audio_manager_module_dump(module);
return 0;
}
EXPORT_SYMBOL_GPL(gb_audio_manager_dump_module);
void gb_audio_manager_dump_all(void)
{
struct gb_audio_manager_module *module;
int count = 0;
down_read(&modules_rwsem);
list_for_each_entry(module, &modules_list, list) {
gb_audio_manager_module_dump(module);
count++;
}
up_read(&modules_rwsem);
pr_info("Number of connected modules: %d\n", count);
}
EXPORT_SYMBOL_GPL(gb_audio_manager_dump_all);
/*
* module init/deinit
*/
static int __init manager_init(void)
{
manager_kset = kset_create_and_add(GB_AUDIO_MANAGER_NAME, NULL,
kernel_kobj);
if (!manager_kset)
return -ENOMEM;
#ifdef GB_AUDIO_MANAGER_SYSFS
gb_audio_manager_sysfs_init(&manager_kset->kobj);
#endif
return 0;
}
static void __exit manager_exit(void)
{
gb_audio_manager_remove_all();
kset_unregister(manager_kset);
ida_destroy(&module_id);
}
module_init(manager_init);
module_exit(manager_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Svetlin Ankov <ankov_svetlin@projectara.com>");

View File

@ -0,0 +1,83 @@
/*
* Greybus operations
*
* Copyright 2015-2016 Google Inc.
*
* Released under the GPLv2 only.
*/
#ifndef _GB_AUDIO_MANAGER_H_
#define _GB_AUDIO_MANAGER_H_
#include <linux/kobject.h>
#include <linux/list.h>
#define GB_AUDIO_MANAGER_NAME "gb_audio_manager"
#define GB_AUDIO_MANAGER_MODULE_NAME_LEN 64
#define GB_AUDIO_MANAGER_MODULE_NAME_LEN_SSCANF "63"
struct gb_audio_manager_module_descriptor {
char name[GB_AUDIO_MANAGER_MODULE_NAME_LEN];
int slot;
int vid;
int pid;
int cport;
unsigned int ip_devices;
unsigned int op_devices;
};
struct gb_audio_manager_module {
struct kobject kobj;
struct list_head list;
int id;
struct gb_audio_manager_module_descriptor desc;
};
/*
* Creates a new gb_audio_manager_module_descriptor, using the specified
* descriptor.
*
* Returns a negative result on error, or the id of the newly created module.
*
*/
int gb_audio_manager_add(struct gb_audio_manager_module_descriptor *desc);
/*
* Removes a connected gb_audio_manager_module_descriptor for the specified ID.
*
* Returns zero on success, or a negative value on error.
*/
int gb_audio_manager_remove(int id);
/*
* Removes all connected gb_audio_modules
*
* Returns zero on success, or a negative value on error.
*/
void gb_audio_manager_remove_all(void);
/*
* Retrieves a gb_audio_manager_module_descriptor for the specified id.
* Returns the gb_audio_manager_module_descriptor structure,
* or NULL if there is no module with the specified ID.
*/
struct gb_audio_manager_module *gb_audio_manager_get_module(int id);
/*
* Decreases the refcount of the module, obtained by the get function.
* Modules are removed via gb_audio_manager_remove
*/
void gb_audio_manager_put_module(struct gb_audio_manager_module *module);
/*
* Dumps the module for the specified id
* Return 0 on success
*/
int gb_audio_manager_dump_module(int id);
/*
* Dumps all connected modules
*/
void gb_audio_manager_dump_all(void);
#endif /* _GB_AUDIO_MANAGER_H_ */

View File

@ -0,0 +1,258 @@
/*
* Greybus operations
*
* Copyright 2015-2016 Google Inc.
*
* Released under the GPLv2 only.
*/
#include <linux/slab.h>
#include "audio_manager.h"
#include "audio_manager_private.h"
#define to_gb_audio_module_attr(x) \
container_of(x, struct gb_audio_manager_module_attribute, attr)
#define to_gb_audio_module(x) \
container_of(x, struct gb_audio_manager_module, kobj)
struct gb_audio_manager_module_attribute {
struct attribute attr;
ssize_t (*show)(struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr,
char *buf);
ssize_t (*store)(struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr,
const char *buf, size_t count);
};
static ssize_t gb_audio_module_attr_show(
struct kobject *kobj, struct attribute *attr, char *buf)
{
struct gb_audio_manager_module_attribute *attribute;
struct gb_audio_manager_module *module;
attribute = to_gb_audio_module_attr(attr);
module = to_gb_audio_module(kobj);
if (!attribute->show)
return -EIO;
return attribute->show(module, attribute, buf);
}
static ssize_t gb_audio_module_attr_store(struct kobject *kobj,
struct attribute *attr,
const char *buf, size_t len)
{
struct gb_audio_manager_module_attribute *attribute;
struct gb_audio_manager_module *module;
attribute = to_gb_audio_module_attr(attr);
module = to_gb_audio_module(kobj);
if (!attribute->store)
return -EIO;
return attribute->store(module, attribute, buf, len);
}
static const struct sysfs_ops gb_audio_module_sysfs_ops = {
.show = gb_audio_module_attr_show,
.store = gb_audio_module_attr_store,
};
static void gb_audio_module_release(struct kobject *kobj)
{
struct gb_audio_manager_module *module = to_gb_audio_module(kobj);
pr_info("Destroying audio module #%d\n", module->id);
/* TODO -> delete from list */
kfree(module);
}
static ssize_t gb_audio_module_name_show(
struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr, char *buf)
{
return sprintf(buf, "%s", module->desc.name);
}
static struct gb_audio_manager_module_attribute gb_audio_module_name_attribute =
__ATTR(name, 0664, gb_audio_module_name_show, NULL);
static ssize_t gb_audio_module_slot_show(
struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr, char *buf)
{
return sprintf(buf, "%d", module->desc.slot);
}
static struct gb_audio_manager_module_attribute gb_audio_module_slot_attribute =
__ATTR(slot, 0664, gb_audio_module_slot_show, NULL);
static ssize_t gb_audio_module_vid_show(
struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr, char *buf)
{
return sprintf(buf, "%d", module->desc.vid);
}
static struct gb_audio_manager_module_attribute gb_audio_module_vid_attribute =
__ATTR(vid, 0664, gb_audio_module_vid_show, NULL);
static ssize_t gb_audio_module_pid_show(
struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr, char *buf)
{
return sprintf(buf, "%d", module->desc.pid);
}
static struct gb_audio_manager_module_attribute gb_audio_module_pid_attribute =
__ATTR(pid, 0664, gb_audio_module_pid_show, NULL);
static ssize_t gb_audio_module_cport_show(
struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr, char *buf)
{
return sprintf(buf, "%d", module->desc.cport);
}
static struct gb_audio_manager_module_attribute
gb_audio_module_cport_attribute =
__ATTR(cport, 0664, gb_audio_module_cport_show, NULL);
static ssize_t gb_audio_module_ip_devices_show(
struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr, char *buf)
{
return sprintf(buf, "0x%X", module->desc.ip_devices);
}
static struct gb_audio_manager_module_attribute
gb_audio_module_ip_devices_attribute =
__ATTR(ip_devices, 0664, gb_audio_module_ip_devices_show, NULL);
static ssize_t gb_audio_module_op_devices_show(
struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr, char *buf)
{
return sprintf(buf, "0x%X", module->desc.op_devices);
}
static struct gb_audio_manager_module_attribute
gb_audio_module_op_devices_attribute =
__ATTR(op_devices, 0664, gb_audio_module_op_devices_show, NULL);
static struct attribute *gb_audio_module_default_attrs[] = {
&gb_audio_module_name_attribute.attr,
&gb_audio_module_slot_attribute.attr,
&gb_audio_module_vid_attribute.attr,
&gb_audio_module_pid_attribute.attr,
&gb_audio_module_cport_attribute.attr,
&gb_audio_module_ip_devices_attribute.attr,
&gb_audio_module_op_devices_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct kobj_type gb_audio_module_type = {
.sysfs_ops = &gb_audio_module_sysfs_ops,
.release = gb_audio_module_release,
.default_attrs = gb_audio_module_default_attrs,
};
static void send_add_uevent(struct gb_audio_manager_module *module)
{
char name_string[128];
char slot_string[64];
char vid_string[64];
char pid_string[64];
char cport_string[64];
char ip_devices_string[64];
char op_devices_string[64];
char *envp[] = {
name_string,
slot_string,
vid_string,
pid_string,
cport_string,
ip_devices_string,
op_devices_string,
NULL
};
snprintf(name_string, 128, "NAME=%s", module->desc.name);
snprintf(slot_string, 64, "SLOT=%d", module->desc.slot);
snprintf(vid_string, 64, "VID=%d", module->desc.vid);
snprintf(pid_string, 64, "PID=%d", module->desc.pid);
snprintf(cport_string, 64, "CPORT=%d", module->desc.cport);
snprintf(ip_devices_string, 64, "I/P DEVICES=0x%X",
module->desc.ip_devices);
snprintf(op_devices_string, 64, "O/P DEVICES=0x%X",
module->desc.op_devices);
kobject_uevent_env(&module->kobj, KOBJ_ADD, envp);
}
int gb_audio_manager_module_create(
struct gb_audio_manager_module **module,
struct kset *manager_kset,
int id, struct gb_audio_manager_module_descriptor *desc)
{
int err;
struct gb_audio_manager_module *m;
m = kzalloc(sizeof(*m), GFP_ATOMIC);
if (!m)
return -ENOMEM;
/* Initialize the node */
INIT_LIST_HEAD(&m->list);
/* Set the module id */
m->id = id;
/* Copy the provided descriptor */
memcpy(&m->desc, desc, sizeof(*desc));
/* set the kset */
m->kobj.kset = manager_kset;
/*
* Initialize and add the kobject to the kernel. All the default files
* will be created here. As we have already specified a kset for this
* kobject, we don't have to set a parent for the kobject, the kobject
* will be placed beneath that kset automatically.
*/
err = kobject_init_and_add(&m->kobj, &gb_audio_module_type, NULL, "%d",
id);
if (err) {
pr_err("failed initializing kobject for audio module #%d\n",
id);
kobject_put(&m->kobj);
return err;
}
/*
* Notify the object was created
*/
send_add_uevent(m);
*module = m;
pr_info("Created audio module #%d\n", id);
return 0;
}
void gb_audio_manager_module_dump(struct gb_audio_manager_module *module)
{
pr_info("audio module #%d name=%s slot=%d vid=%d pid=%d cport=%d i/p devices=0x%X o/p devices=0x%X\n",
module->id,
module->desc.name,
module->desc.slot,
module->desc.vid,
module->desc.pid,
module->desc.cport,
module->desc.ip_devices,
module->desc.op_devices);
}

View File

@ -0,0 +1,28 @@
/*
* Greybus operations
*
* Copyright 2015-2016 Google Inc.
*
* Released under the GPLv2 only.
*/
#ifndef _GB_AUDIO_MANAGER_PRIVATE_H_
#define _GB_AUDIO_MANAGER_PRIVATE_H_
#include <linux/kobject.h>
#include "audio_manager.h"
int gb_audio_manager_module_create(
struct gb_audio_manager_module **module,
struct kset *manager_kset,
int id, struct gb_audio_manager_module_descriptor *desc);
/* module destroyed via kobject_put */
void gb_audio_manager_module_dump(struct gb_audio_manager_module *module);
/* sysfs control */
void gb_audio_manager_sysfs_init(struct kobject *kobj);
#endif /* _GB_AUDIO_MANAGER_PRIVATE_H_ */

View File

@ -0,0 +1,102 @@
/*
* Greybus operations
*
* Copyright 2015-2016 Google Inc.
*
* Released under the GPLv2 only.
*/
#include <linux/string.h>
#include <linux/sysfs.h>
#include "audio_manager.h"
#include "audio_manager_private.h"
static ssize_t manager_sysfs_add_store(
struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct gb_audio_manager_module_descriptor desc = { {0} };
int num = sscanf(buf,
"name=%" GB_AUDIO_MANAGER_MODULE_NAME_LEN_SSCANF "s "
"slot=%d vid=%d pid=%d cport=%d i/p devices=0x%X"
"o/p devices=0x%X",
desc.name, &desc.slot, &desc.vid, &desc.pid,
&desc.cport, &desc.ip_devices, &desc.op_devices);
if (num != 7)
return -EINVAL;
num = gb_audio_manager_add(&desc);
if (num < 0)
return -EINVAL;
return count;
}
static struct kobj_attribute manager_add_attribute =
__ATTR(add, 0664, NULL, manager_sysfs_add_store);
static ssize_t manager_sysfs_remove_store(
struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int id;
int num = sscanf(buf, "%d", &id);
if (num != 1)
return -EINVAL;
num = gb_audio_manager_remove(id);
if (num)
return num;
return count;
}
static struct kobj_attribute manager_remove_attribute =
__ATTR(remove, 0664, NULL, manager_sysfs_remove_store);
static ssize_t manager_sysfs_dump_store(
struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int id;
int num = sscanf(buf, "%d", &id);
if (num == 1) {
num = gb_audio_manager_dump_module(id);
if (num)
return num;
} else if (!strncmp("all", buf, 3))
gb_audio_manager_dump_all();
else
return -EINVAL;
return count;
}
static struct kobj_attribute manager_dump_attribute =
__ATTR(dump, 0664, NULL, manager_sysfs_dump_store);
static void manager_sysfs_init_attribute(
struct kobject *kobj, struct kobj_attribute *kattr)
{
int err;
err = sysfs_create_file(kobj, &kattr->attr);
if (err) {
pr_warn("creating the sysfs entry for %s failed: %d\n",
kattr->attr.name, err);
}
}
void gb_audio_manager_sysfs_init(struct kobject *kobj)
{
manager_sysfs_init_attribute(kobj, &manager_add_attribute);
manager_sysfs_init_attribute(kobj, &manager_remove_attribute);
manager_sysfs_init_attribute(kobj, &manager_dump_attribute);
}

View File

@ -0,0 +1,482 @@
/*
* Greybus audio driver
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <sound/soc.h>
#include <sound/pcm_params.h>
#include "audio_codec.h"
#include "audio_apbridgea.h"
#include "audio_manager.h"
/*
* gb_snd management functions
*/
static int gbaudio_request_jack(struct gbaudio_module_info *module,
struct gb_audio_jack_event_request *req)
{
int report;
struct snd_jack *jack = module->headset_jack.jack;
struct snd_jack *btn_jack = module->button_jack.jack;
if (!jack) {
dev_err_ratelimited(module->dev,
"Invalid jack event received:type: %u, event: %u\n",
req->jack_attribute, req->event);
return -EINVAL;
}
dev_warn_ratelimited(module->dev,
"Jack Event received: type: %u, event: %u\n",
req->jack_attribute, req->event);
if (req->event == GB_AUDIO_JACK_EVENT_REMOVAL) {
module->jack_type = 0;
if (btn_jack && module->button_status) {
snd_soc_jack_report(&module->button_jack, 0,
module->button_mask);
module->button_status = 0;
}
snd_soc_jack_report(&module->headset_jack, 0,
module->jack_mask);
return 0;
}
report = req->jack_attribute & module->jack_mask;
if (!report) {
dev_err_ratelimited(module->dev,
"Invalid jack event received:type: %u, event: %u\n",
req->jack_attribute, req->event);
return -EINVAL;
}
if (module->jack_type)
dev_warn_ratelimited(module->dev,
"Modifying jack from %d to %d\n",
module->jack_type, report);
module->jack_type = report;
snd_soc_jack_report(&module->headset_jack, report, module->jack_mask);
return 0;
}
static int gbaudio_request_button(struct gbaudio_module_info *module,
struct gb_audio_button_event_request *req)
{
int soc_button_id, report;
struct snd_jack *btn_jack = module->button_jack.jack;
if (!btn_jack) {
dev_err_ratelimited(module->dev,
"Invalid button event received:type: %u, event: %u\n",
req->button_id, req->event);
return -EINVAL;
}
dev_warn_ratelimited(module->dev,
"Button Event received: id: %u, event: %u\n",
req->button_id, req->event);
/* currently supports 4 buttons only */
if (!module->jack_type) {
dev_err_ratelimited(module->dev,
"Jack not present. Bogus event!!\n");
return -EINVAL;
}
report = module->button_status & module->button_mask;
soc_button_id = 0;
switch (req->button_id) {
case 1:
soc_button_id = SND_JACK_BTN_0 & module->button_mask;
break;
case 2:
soc_button_id = SND_JACK_BTN_1 & module->button_mask;
break;
case 3:
soc_button_id = SND_JACK_BTN_2 & module->button_mask;
break;
case 4:
soc_button_id = SND_JACK_BTN_3 & module->button_mask;
break;
}
if (!soc_button_id) {
dev_err_ratelimited(module->dev,
"Invalid button request received\n");
return -EINVAL;
}
if (req->event == GB_AUDIO_BUTTON_EVENT_PRESS)
report = report | soc_button_id;
else
report = report & ~soc_button_id;
module->button_status = report;
snd_soc_jack_report(&module->button_jack, report, module->button_mask);
return 0;
}
static int gbaudio_request_stream(struct gbaudio_module_info *module,
struct gb_audio_streaming_event_request *req)
{
dev_warn(module->dev, "Audio Event received: cport: %u, event: %u\n",
req->data_cport, req->event);
return 0;
}
static int gbaudio_codec_request_handler(struct gb_operation *op)
{
struct gb_connection *connection = op->connection;
struct gbaudio_module_info *module =
greybus_get_drvdata(connection->bundle);
struct gb_operation_msg_hdr *header = op->request->header;
struct gb_audio_streaming_event_request *stream_req;
struct gb_audio_jack_event_request *jack_req;
struct gb_audio_button_event_request *button_req;
int ret;
switch (header->type) {
case GB_AUDIO_TYPE_STREAMING_EVENT:
stream_req = op->request->payload;
ret = gbaudio_request_stream(module, stream_req);
break;
case GB_AUDIO_TYPE_JACK_EVENT:
jack_req = op->request->payload;
ret = gbaudio_request_jack(module, jack_req);
break;
case GB_AUDIO_TYPE_BUTTON_EVENT:
button_req = op->request->payload;
ret = gbaudio_request_button(module, button_req);
break;
default:
dev_err_ratelimited(&connection->bundle->dev,
"Invalid Audio Event received\n");
return -EINVAL;
}
return ret;
}
static int gb_audio_add_mgmt_connection(struct gbaudio_module_info *gbmodule,
struct greybus_descriptor_cport *cport_desc,
struct gb_bundle *bundle)
{
struct gb_connection *connection;
/* Management Cport */
if (gbmodule->mgmt_connection) {
dev_err(&bundle->dev,
"Can't have multiple Management connections\n");
return -ENODEV;
}
connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
gbaudio_codec_request_handler);
if (IS_ERR(connection))
return PTR_ERR(connection);
greybus_set_drvdata(bundle, gbmodule);
gbmodule->mgmt_connection = connection;
return 0;
}
static int gb_audio_add_data_connection(struct gbaudio_module_info *gbmodule,
struct greybus_descriptor_cport *cport_desc,
struct gb_bundle *bundle)
{
struct gb_connection *connection;
struct gbaudio_data_connection *dai;
dai = devm_kzalloc(gbmodule->dev, sizeof(*dai), GFP_KERNEL);
if (!dai) {
dev_err(gbmodule->dev, "DAI Malloc failure\n");
return -ENOMEM;
}
connection = gb_connection_create_offloaded(bundle,
le16_to_cpu(cport_desc->id),
GB_CONNECTION_FLAG_CSD);
if (IS_ERR(connection)) {
devm_kfree(gbmodule->dev, dai);
return PTR_ERR(connection);
}
greybus_set_drvdata(bundle, gbmodule);
dai->id = 0;
dai->data_cport = connection->intf_cport_id;
dai->connection = connection;
list_add(&dai->list, &gbmodule->data_list);
return 0;
}
/*
* This is the basic hook get things initialized and registered w/ gb
*/
static int gb_audio_probe(struct gb_bundle *bundle,
const struct greybus_bundle_id *id)
{
struct device *dev = &bundle->dev;
struct gbaudio_module_info *gbmodule;
struct greybus_descriptor_cport *cport_desc;
struct gb_audio_manager_module_descriptor desc;
struct gbaudio_data_connection *dai, *_dai;
int ret, i;
struct gb_audio_topology *topology;
/* There should be at least one Management and one Data cport */
if (bundle->num_cports < 2)
return -ENODEV;
/*
* There can be only one Management connection and any number of data
* connections.
*/
gbmodule = devm_kzalloc(dev, sizeof(*gbmodule), GFP_KERNEL);
if (!gbmodule)
return -ENOMEM;
gbmodule->num_data_connections = bundle->num_cports - 1;
INIT_LIST_HEAD(&gbmodule->data_list);
INIT_LIST_HEAD(&gbmodule->widget_list);
INIT_LIST_HEAD(&gbmodule->ctl_list);
INIT_LIST_HEAD(&gbmodule->widget_ctl_list);
gbmodule->dev = dev;
snprintf(gbmodule->name, NAME_SIZE, "%s.%s", dev->driver->name,
dev_name(dev));
greybus_set_drvdata(bundle, gbmodule);
/* Create all connections */
for (i = 0; i < bundle->num_cports; i++) {
cport_desc = &bundle->cport_desc[i];
switch (cport_desc->protocol_id) {
case GREYBUS_PROTOCOL_AUDIO_MGMT:
ret = gb_audio_add_mgmt_connection(gbmodule, cport_desc,
bundle);
if (ret)
goto destroy_connections;
break;
case GREYBUS_PROTOCOL_AUDIO_DATA:
ret = gb_audio_add_data_connection(gbmodule, cport_desc,
bundle);
if (ret)
goto destroy_connections;
break;
default:
dev_err(dev, "Unsupported protocol: 0x%02x\n",
cport_desc->protocol_id);
ret = -ENODEV;
goto destroy_connections;
}
}
/* There must be a management cport */
if (!gbmodule->mgmt_connection) {
ret = -EINVAL;
dev_err(dev, "Missing management connection\n");
goto destroy_connections;
}
/* Initialize management connection */
ret = gb_connection_enable(gbmodule->mgmt_connection);
if (ret) {
dev_err(dev, "%d: Error while enabling mgmt connection\n", ret);
goto destroy_connections;
}
gbmodule->dev_id = gbmodule->mgmt_connection->intf->interface_id;
/*
* FIXME: malloc for topology happens via audio_gb driver
* should be done within codec driver itself
*/
ret = gb_audio_gb_get_topology(gbmodule->mgmt_connection, &topology);
if (ret) {
dev_err(dev, "%d:Error while fetching topology\n", ret);
goto disable_connection;
}
/* process topology data */
ret = gbaudio_tplg_parse_data(gbmodule, topology);
if (ret) {
dev_err(dev, "%d:Error while parsing topology data\n",
ret);
goto free_topology;
}
gbmodule->topology = topology;
/* Initialize data connections */
list_for_each_entry(dai, &gbmodule->data_list, list) {
ret = gb_connection_enable(dai->connection);
if (ret) {
dev_err(dev,
"%d:Error while enabling %d:data connection\n",
ret, dai->data_cport);
goto disable_data_connection;
}
}
/* register module with gbcodec */
ret = gbaudio_register_module(gbmodule);
if (ret)
goto disable_data_connection;
/* inform above layer for uevent */
dev_dbg(dev, "Inform set_event:%d to above layer\n", 1);
/* prepare for the audio manager */
strlcpy(desc.name, gbmodule->name, GB_AUDIO_MANAGER_MODULE_NAME_LEN);
desc.slot = 1; /* todo */
desc.vid = 2; /* todo */
desc.pid = 3; /* todo */
desc.cport = gbmodule->dev_id;
desc.op_devices = gbmodule->op_devices;
desc.ip_devices = gbmodule->ip_devices;
gbmodule->manager_id = gb_audio_manager_add(&desc);
dev_dbg(dev, "Add GB Audio device:%s\n", gbmodule->name);
gb_pm_runtime_put_autosuspend(bundle);
return 0;
disable_data_connection:
list_for_each_entry_safe(dai, _dai, &gbmodule->data_list, list)
gb_connection_disable(dai->connection);
gbaudio_tplg_release(gbmodule);
gbmodule->topology = NULL;
free_topology:
kfree(topology);
disable_connection:
gb_connection_disable(gbmodule->mgmt_connection);
destroy_connections:
list_for_each_entry_safe(dai, _dai, &gbmodule->data_list, list) {
gb_connection_destroy(dai->connection);
list_del(&dai->list);
devm_kfree(dev, dai);
}
if (gbmodule->mgmt_connection)
gb_connection_destroy(gbmodule->mgmt_connection);
devm_kfree(dev, gbmodule);
return ret;
}
static void gb_audio_disconnect(struct gb_bundle *bundle)
{
struct gbaudio_module_info *gbmodule = greybus_get_drvdata(bundle);
struct gbaudio_data_connection *dai, *_dai;
gb_pm_runtime_get_sync(bundle);
/* cleanup module related resources first */
gbaudio_unregister_module(gbmodule);
/* inform uevent to above layers */
gb_audio_manager_remove(gbmodule->manager_id);
gbaudio_tplg_release(gbmodule);
kfree(gbmodule->topology);
gbmodule->topology = NULL;
gb_connection_disable(gbmodule->mgmt_connection);
list_for_each_entry_safe(dai, _dai, &gbmodule->data_list, list) {
gb_connection_disable(dai->connection);
gb_connection_destroy(dai->connection);
list_del(&dai->list);
devm_kfree(gbmodule->dev, dai);
}
gb_connection_destroy(gbmodule->mgmt_connection);
gbmodule->mgmt_connection = NULL;
devm_kfree(&bundle->dev, gbmodule);
}
static const struct greybus_bundle_id gb_audio_id_table[] = {
{ GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_AUDIO) },
{ }
};
MODULE_DEVICE_TABLE(greybus, gb_audio_id_table);
#ifdef CONFIG_PM_RUNTIME
static int gb_audio_suspend(struct device *dev)
{
struct gb_bundle *bundle = to_gb_bundle(dev);
struct gbaudio_module_info *gbmodule = greybus_get_drvdata(bundle);
struct gbaudio_data_connection *dai;
list_for_each_entry(dai, &gbmodule->data_list, list)
gb_connection_disable(dai->connection);
gb_connection_disable(gbmodule->mgmt_connection);
return 0;
}
static int gb_audio_resume(struct device *dev)
{
struct gb_bundle *bundle = to_gb_bundle(dev);
struct gbaudio_module_info *gbmodule = greybus_get_drvdata(bundle);
struct gbaudio_data_connection *dai;
int ret;
ret = gb_connection_enable(gbmodule->mgmt_connection);
if (ret) {
dev_err(dev, "%d:Error while enabling mgmt connection\n", ret);
return ret;
}
list_for_each_entry(dai, &gbmodule->data_list, list) {
ret = gb_connection_enable(dai->connection);
if (ret) {
dev_err(dev,
"%d:Error while enabling %d:data connection\n",
ret, dai->data_cport);
return ret;
}
}
return 0;
}
#endif
static const struct dev_pm_ops gb_audio_pm_ops = {
SET_RUNTIME_PM_OPS(gb_audio_suspend, gb_audio_resume, NULL)
};
static struct greybus_driver gb_audio_driver = {
.name = "gb-audio",
.probe = gb_audio_probe,
.disconnect = gb_audio_disconnect,
.id_table = gb_audio_id_table,
.driver.pm = &gb_audio_pm_ops,
};
module_greybus_driver(gb_audio_driver);
MODULE_DESCRIPTION("Greybus Audio module driver");
MODULE_AUTHOR("Vaibhav Agarwal <vaibhav.agarwal@linaro.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:gbaudio-module");

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,429 @@
/*
* Greybus Component Authentication Protocol (CAP) Driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include "greybus.h"
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/uaccess.h>
#include "greybus_authentication.h"
#include "firmware.h"
#include "greybus.h"
#define CAP_TIMEOUT_MS 1000
/*
* Number of minor devices this driver supports.
* There will be exactly one required per Interface.
*/
#define NUM_MINORS U8_MAX
struct gb_cap {
struct device *parent;
struct gb_connection *connection;
struct kref kref;
struct list_head node;
bool disabled; /* connection getting disabled */
struct mutex mutex;
struct cdev cdev;
struct device *class_device;
dev_t dev_num;
};
static struct class *cap_class;
static dev_t cap_dev_num;
static DEFINE_IDA(cap_minors_map);
static LIST_HEAD(cap_list);
static DEFINE_MUTEX(list_mutex);
static void cap_kref_release(struct kref *kref)
{
struct gb_cap *cap = container_of(kref, struct gb_cap, kref);
kfree(cap);
}
/*
* All users of cap take a reference (from within list_mutex lock), before
* they get a pointer to play with. And the structure will be freed only after
* the last user has put the reference to it.
*/
static void put_cap(struct gb_cap *cap)
{
kref_put(&cap->kref, cap_kref_release);
}
/* Caller must call put_cap() after using struct gb_cap */
static struct gb_cap *get_cap(struct cdev *cdev)
{
struct gb_cap *cap;
mutex_lock(&list_mutex);
list_for_each_entry(cap, &cap_list, node) {
if (&cap->cdev == cdev) {
kref_get(&cap->kref);
goto unlock;
}
}
cap = NULL;
unlock:
mutex_unlock(&list_mutex);
return cap;
}
static int cap_get_endpoint_uid(struct gb_cap *cap, u8 *euid)
{
struct gb_connection *connection = cap->connection;
struct gb_cap_get_endpoint_uid_response response;
int ret;
ret = gb_operation_sync(connection, GB_CAP_TYPE_GET_ENDPOINT_UID, NULL,
0, &response, sizeof(response));
if (ret) {
dev_err(cap->parent, "failed to get endpoint uid (%d)\n", ret);
return ret;
}
memcpy(euid, response.uid, sizeof(response.uid));
return 0;
}
static int cap_get_ims_certificate(struct gb_cap *cap, u32 class, u32 id,
u8 *certificate, u32 *size, u8 *result)
{
struct gb_connection *connection = cap->connection;
struct gb_cap_get_ims_certificate_request *request;
struct gb_cap_get_ims_certificate_response *response;
size_t max_size = gb_operation_get_payload_size_max(connection);
struct gb_operation *op;
int ret;
op = gb_operation_create_flags(connection,
GB_CAP_TYPE_GET_IMS_CERTIFICATE,
sizeof(*request), max_size,
GB_OPERATION_FLAG_SHORT_RESPONSE,
GFP_KERNEL);
if (!op)
return -ENOMEM;
request = op->request->payload;
request->certificate_class = cpu_to_le32(class);
request->certificate_id = cpu_to_le32(id);
ret = gb_operation_request_send_sync(op);
if (ret) {
dev_err(cap->parent, "failed to get certificate (%d)\n", ret);
goto done;
}
response = op->response->payload;
*result = response->result_code;
*size = op->response->payload_size - sizeof(*response);
memcpy(certificate, response->certificate, *size);
done:
gb_operation_put(op);
return ret;
}
static int cap_authenticate(struct gb_cap *cap, u32 auth_type, u8 *uid,
u8 *challenge, u8 *result, u8 *auth_response,
u32 *signature_size, u8 *signature)
{
struct gb_connection *connection = cap->connection;
struct gb_cap_authenticate_request *request;
struct gb_cap_authenticate_response *response;
size_t max_size = gb_operation_get_payload_size_max(connection);
struct gb_operation *op;
int ret;
op = gb_operation_create_flags(connection, GB_CAP_TYPE_AUTHENTICATE,
sizeof(*request), max_size,
GB_OPERATION_FLAG_SHORT_RESPONSE,
GFP_KERNEL);
if (!op)
return -ENOMEM;
request = op->request->payload;
request->auth_type = cpu_to_le32(auth_type);
memcpy(request->uid, uid, sizeof(request->uid));
memcpy(request->challenge, challenge, sizeof(request->challenge));
ret = gb_operation_request_send_sync(op);
if (ret) {
dev_err(cap->parent, "failed to authenticate (%d)\n", ret);
goto done;
}
response = op->response->payload;
*result = response->result_code;
*signature_size = op->response->payload_size - sizeof(*response);
memcpy(auth_response, response->response, sizeof(response->response));
memcpy(signature, response->signature, *signature_size);
done:
gb_operation_put(op);
return ret;
}
/* Char device fops */
static int cap_open(struct inode *inode, struct file *file)
{
struct gb_cap *cap = get_cap(inode->i_cdev);
/* cap structure can't get freed until file descriptor is closed */
if (cap) {
file->private_data = cap;
return 0;
}
return -ENODEV;
}
static int cap_release(struct inode *inode, struct file *file)
{
struct gb_cap *cap = file->private_data;
put_cap(cap);
return 0;
}
static int cap_ioctl(struct gb_cap *cap, unsigned int cmd,
void __user *buf)
{
struct cap_ioc_get_endpoint_uid endpoint_uid;
struct cap_ioc_get_ims_certificate *ims_cert;
struct cap_ioc_authenticate *authenticate;
size_t size;
int ret;
switch (cmd) {
case CAP_IOC_GET_ENDPOINT_UID:
ret = cap_get_endpoint_uid(cap, endpoint_uid.uid);
if (ret)
return ret;
if (copy_to_user(buf, &endpoint_uid, sizeof(endpoint_uid)))
return -EFAULT;
return 0;
case CAP_IOC_GET_IMS_CERTIFICATE:
size = sizeof(*ims_cert);
ims_cert = memdup_user(buf, size);
if (IS_ERR(ims_cert))
return PTR_ERR(ims_cert);
ret = cap_get_ims_certificate(cap, ims_cert->certificate_class,
ims_cert->certificate_id,
ims_cert->certificate,
&ims_cert->cert_size,
&ims_cert->result_code);
if (!ret && copy_to_user(buf, ims_cert, size))
ret = -EFAULT;
kfree(ims_cert);
return ret;
case CAP_IOC_AUTHENTICATE:
size = sizeof(*authenticate);
authenticate = memdup_user(buf, size);
if (IS_ERR(authenticate))
return PTR_ERR(authenticate);
ret = cap_authenticate(cap, authenticate->auth_type,
authenticate->uid,
authenticate->challenge,
&authenticate->result_code,
authenticate->response,
&authenticate->signature_size,
authenticate->signature);
if (!ret && copy_to_user(buf, authenticate, size))
ret = -EFAULT;
kfree(authenticate);
return ret;
default:
return -ENOTTY;
}
}
static long cap_ioctl_unlocked(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct gb_cap *cap = file->private_data;
struct gb_bundle *bundle = cap->connection->bundle;
int ret = -ENODEV;
/*
* Serialize ioctls.
*
* We don't want the user to do multiple authentication operations in
* parallel.
*
* This is also used to protect ->disabled, which is used to check if
* the connection is getting disconnected, so that we don't start any
* new operations.
*/
mutex_lock(&cap->mutex);
if (!cap->disabled) {
ret = gb_pm_runtime_get_sync(bundle);
if (!ret) {
ret = cap_ioctl(cap, cmd, (void __user *)arg);
gb_pm_runtime_put_autosuspend(bundle);
}
}
mutex_unlock(&cap->mutex);
return ret;
}
static const struct file_operations cap_fops = {
.owner = THIS_MODULE,
.open = cap_open,
.release = cap_release,
.unlocked_ioctl = cap_ioctl_unlocked,
};
int gb_cap_connection_init(struct gb_connection *connection)
{
struct gb_cap *cap;
int ret, minor;
if (!connection)
return 0;
cap = kzalloc(sizeof(*cap), GFP_KERNEL);
if (!cap)
return -ENOMEM;
cap->parent = &connection->bundle->dev;
cap->connection = connection;
mutex_init(&cap->mutex);
gb_connection_set_data(connection, cap);
kref_init(&cap->kref);
mutex_lock(&list_mutex);
list_add(&cap->node, &cap_list);
mutex_unlock(&list_mutex);
ret = gb_connection_enable(connection);
if (ret)
goto err_list_del;
minor = ida_simple_get(&cap_minors_map, 0, NUM_MINORS, GFP_KERNEL);
if (minor < 0) {
ret = minor;
goto err_connection_disable;
}
/* Add a char device to allow userspace to interact with cap */
cap->dev_num = MKDEV(MAJOR(cap_dev_num), minor);
cdev_init(&cap->cdev, &cap_fops);
ret = cdev_add(&cap->cdev, cap->dev_num, 1);
if (ret)
goto err_remove_ida;
/* Add a soft link to the previously added char-dev within the bundle */
cap->class_device = device_create(cap_class, cap->parent, cap->dev_num,
NULL, "gb-authenticate-%d", minor);
if (IS_ERR(cap->class_device)) {
ret = PTR_ERR(cap->class_device);
goto err_del_cdev;
}
return 0;
err_del_cdev:
cdev_del(&cap->cdev);
err_remove_ida:
ida_simple_remove(&cap_minors_map, minor);
err_connection_disable:
gb_connection_disable(connection);
err_list_del:
mutex_lock(&list_mutex);
list_del(&cap->node);
mutex_unlock(&list_mutex);
put_cap(cap);
return ret;
}
void gb_cap_connection_exit(struct gb_connection *connection)
{
struct gb_cap *cap;
if (!connection)
return;
cap = gb_connection_get_data(connection);
device_destroy(cap_class, cap->dev_num);
cdev_del(&cap->cdev);
ida_simple_remove(&cap_minors_map, MINOR(cap->dev_num));
/*
* Disallow any new ioctl operations on the char device and wait for
* existing ones to finish.
*/
mutex_lock(&cap->mutex);
cap->disabled = true;
mutex_unlock(&cap->mutex);
/* All pending greybus operations should have finished by now */
gb_connection_disable(cap->connection);
/* Disallow new users to get access to the cap structure */
mutex_lock(&list_mutex);
list_del(&cap->node);
mutex_unlock(&list_mutex);
/*
* All current users of cap would have taken a reference to it by
* now, we can drop our reference and wait the last user will get
* cap freed.
*/
put_cap(cap);
}
int cap_init(void)
{
int ret;
cap_class = class_create(THIS_MODULE, "gb_authenticate");
if (IS_ERR(cap_class))
return PTR_ERR(cap_class);
ret = alloc_chrdev_region(&cap_dev_num, 0, NUM_MINORS,
"gb_authenticate");
if (ret)
goto err_remove_class;
return 0;
err_remove_class:
class_destroy(cap_class);
return ret;
}
void cap_exit(void)
{
unregister_chrdev_region(cap_dev_num, NUM_MINORS);
class_destroy(cap_class);
ida_destroy(&cap_minors_map);
}

View File

@ -0,0 +1,524 @@
/*
* BOOTROM Greybus driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/firmware.h>
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include "greybus.h"
#include "firmware.h"
/* Timeout, in jiffies, within which the next request must be received */
#define NEXT_REQ_TIMEOUT_MS 1000
/*
* FIXME: Reduce this timeout once svc core handles parallel processing of
* events from the SVC, which are handled sequentially today.
*/
#define MODE_SWITCH_TIMEOUT_MS 10000
enum next_request_type {
NEXT_REQ_FIRMWARE_SIZE,
NEXT_REQ_GET_FIRMWARE,
NEXT_REQ_READY_TO_BOOT,
NEXT_REQ_MODE_SWITCH,
};
struct gb_bootrom {
struct gb_connection *connection;
const struct firmware *fw;
u8 protocol_major;
u8 protocol_minor;
enum next_request_type next_request;
struct delayed_work dwork;
struct mutex mutex; /* Protects bootrom->fw */
};
static void free_firmware(struct gb_bootrom *bootrom)
{
if (!bootrom->fw)
return;
release_firmware(bootrom->fw);
bootrom->fw = NULL;
}
static void gb_bootrom_timedout(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct gb_bootrom *bootrom = container_of(dwork, struct gb_bootrom, dwork);
struct device *dev = &bootrom->connection->bundle->dev;
const char *reason;
switch (bootrom->next_request) {
case NEXT_REQ_FIRMWARE_SIZE:
reason = "Firmware Size Request";
break;
case NEXT_REQ_GET_FIRMWARE:
reason = "Get Firmware Request";
break;
case NEXT_REQ_READY_TO_BOOT:
reason = "Ready to Boot Request";
break;
case NEXT_REQ_MODE_SWITCH:
reason = "Interface Mode Switch";
break;
default:
reason = NULL;
dev_err(dev, "Invalid next-request: %u", bootrom->next_request);
break;
}
dev_err(dev, "Timed out waiting for %s from the Module\n", reason);
mutex_lock(&bootrom->mutex);
free_firmware(bootrom);
mutex_unlock(&bootrom->mutex);
/* TODO: Power-off Module ? */
}
static void gb_bootrom_set_timeout(struct gb_bootrom *bootrom,
enum next_request_type next, unsigned long timeout)
{
bootrom->next_request = next;
schedule_delayed_work(&bootrom->dwork, msecs_to_jiffies(timeout));
}
static void gb_bootrom_cancel_timeout(struct gb_bootrom *bootrom)
{
cancel_delayed_work_sync(&bootrom->dwork);
}
/*
* The es2 chip doesn't have VID/PID programmed into the hardware and we need to
* hack that up to distinguish different modules and their firmware blobs.
*
* This fetches VID/PID (over bootrom protocol) for es2 chip only, when VID/PID
* already sent during hotplug are 0.
*
* Otherwise, we keep intf->vendor_id/product_id same as what's passed
* during hotplug.
*/
static void bootrom_es2_fixup_vid_pid(struct gb_bootrom *bootrom)
{
struct gb_bootrom_get_vid_pid_response response;
struct gb_connection *connection = bootrom->connection;
struct gb_interface *intf = connection->bundle->intf;
int ret;
if (!(intf->quirks & GB_INTERFACE_QUIRK_NO_GMP_IDS))
return;
ret = gb_operation_sync(connection, GB_BOOTROM_TYPE_GET_VID_PID,
NULL, 0, &response, sizeof(response));
if (ret) {
dev_err(&connection->bundle->dev,
"Bootrom get vid/pid operation failed (%d)\n", ret);
return;
}
/*
* NOTE: This is hacked, so that the same values of VID/PID can be used
* by next firmware level as well. The uevent for bootrom will still
* have VID/PID as 0, though after this point the sysfs files will start
* showing the updated values. But yeah, that's a bit racy as the same
* sysfs files would be showing 0 before this point.
*/
intf->vendor_id = le32_to_cpu(response.vendor_id);
intf->product_id = le32_to_cpu(response.product_id);
dev_dbg(&connection->bundle->dev, "Bootrom got vid (0x%x)/pid (0x%x)\n",
intf->vendor_id, intf->product_id);
}
/* This returns path of the firmware blob on the disk */
static int find_firmware(struct gb_bootrom *bootrom, u8 stage)
{
struct gb_connection *connection = bootrom->connection;
struct gb_interface *intf = connection->bundle->intf;
char firmware_name[49];
int rc;
/* Already have a firmware, free it */
free_firmware(bootrom);
/* Bootrom protocol is only supported for loading Stage 2 firmware */
if (stage != 2) {
dev_err(&connection->bundle->dev, "Invalid boot stage: %u\n",
stage);
return -EINVAL;
}
/*
* Create firmware name
*
* XXX Name it properly..
*/
snprintf(firmware_name, sizeof(firmware_name),
FW_NAME_PREFIX "%08x_%08x_%08x_%08x_s2l.tftf",
intf->ddbl1_manufacturer_id, intf->ddbl1_product_id,
intf->vendor_id, intf->product_id);
// FIXME:
// Turn to dev_dbg later after everyone has valid bootloaders with good
// ids, but leave this as dev_info for now to make it easier to track
// down "empty" vid/pid modules.
dev_info(&connection->bundle->dev, "Firmware file '%s' requested\n",
firmware_name);
rc = request_firmware(&bootrom->fw, firmware_name,
&connection->bundle->dev);
if (rc) {
dev_err(&connection->bundle->dev,
"failed to find %s firmware (%d)\n", firmware_name, rc);
}
return rc;
}
static int gb_bootrom_firmware_size_request(struct gb_operation *op)
{
struct gb_bootrom *bootrom = gb_connection_get_data(op->connection);
struct gb_bootrom_firmware_size_request *size_request = op->request->payload;
struct gb_bootrom_firmware_size_response *size_response;
struct device *dev = &op->connection->bundle->dev;
int ret;
/* Disable timeouts */
gb_bootrom_cancel_timeout(bootrom);
if (op->request->payload_size != sizeof(*size_request)) {
dev_err(dev, "%s: illegal size of firmware size request (%zu != %zu)\n",
__func__, op->request->payload_size,
sizeof(*size_request));
ret = -EINVAL;
goto queue_work;
}
mutex_lock(&bootrom->mutex);
ret = find_firmware(bootrom, size_request->stage);
if (ret)
goto unlock;
if (!gb_operation_response_alloc(op, sizeof(*size_response),
GFP_KERNEL)) {
dev_err(dev, "%s: error allocating response\n", __func__);
free_firmware(bootrom);
ret = -ENOMEM;
goto unlock;
}
size_response = op->response->payload;
size_response->size = cpu_to_le32(bootrom->fw->size);
dev_dbg(dev, "%s: firmware size %d bytes\n", __func__, size_response->size);
unlock:
mutex_unlock(&bootrom->mutex);
queue_work:
if (!ret) {
/* Refresh timeout */
gb_bootrom_set_timeout(bootrom, NEXT_REQ_GET_FIRMWARE,
NEXT_REQ_TIMEOUT_MS);
}
return ret;
}
static int gb_bootrom_get_firmware(struct gb_operation *op)
{
struct gb_bootrom *bootrom = gb_connection_get_data(op->connection);
const struct firmware *fw;
struct gb_bootrom_get_firmware_request *firmware_request;
struct gb_bootrom_get_firmware_response *firmware_response;
struct device *dev = &op->connection->bundle->dev;
unsigned int offset, size;
enum next_request_type next_request;
int ret = 0;
/* Disable timeouts */
gb_bootrom_cancel_timeout(bootrom);
if (op->request->payload_size != sizeof(*firmware_request)) {
dev_err(dev, "%s: Illegal size of get firmware request (%zu %zu)\n",
__func__, op->request->payload_size,
sizeof(*firmware_request));
ret = -EINVAL;
goto queue_work;
}
mutex_lock(&bootrom->mutex);
fw = bootrom->fw;
if (!fw) {
dev_err(dev, "%s: firmware not available\n", __func__);
ret = -EINVAL;
goto unlock;
}
firmware_request = op->request->payload;
offset = le32_to_cpu(firmware_request->offset);
size = le32_to_cpu(firmware_request->size);
if (offset >= fw->size || size > fw->size - offset) {
dev_warn(dev, "bad firmware request (offs = %u, size = %u)\n",
offset, size);
ret = -EINVAL;
goto unlock;
}
if (!gb_operation_response_alloc(op, sizeof(*firmware_response) + size,
GFP_KERNEL)) {
dev_err(dev, "%s: error allocating response\n", __func__);
ret = -ENOMEM;
goto unlock;
}
firmware_response = op->response->payload;
memcpy(firmware_response->data, fw->data + offset, size);
dev_dbg(dev, "responding with firmware (offs = %u, size = %u)\n", offset,
size);
unlock:
mutex_unlock(&bootrom->mutex);
queue_work:
/* Refresh timeout */
if (!ret && (offset + size == fw->size))
next_request = NEXT_REQ_READY_TO_BOOT;
else
next_request = NEXT_REQ_GET_FIRMWARE;
gb_bootrom_set_timeout(bootrom, next_request, NEXT_REQ_TIMEOUT_MS);
return ret;
}
static int gb_bootrom_ready_to_boot(struct gb_operation *op)
{
struct gb_connection *connection = op->connection;
struct gb_bootrom *bootrom = gb_connection_get_data(connection);
struct gb_bootrom_ready_to_boot_request *rtb_request;
struct device *dev = &connection->bundle->dev;
u8 status;
int ret = 0;
/* Disable timeouts */
gb_bootrom_cancel_timeout(bootrom);
if (op->request->payload_size != sizeof(*rtb_request)) {
dev_err(dev, "%s: Illegal size of ready to boot request (%zu %zu)\n",
__func__, op->request->payload_size,
sizeof(*rtb_request));
ret = -EINVAL;
goto queue_work;
}
rtb_request = op->request->payload;
status = rtb_request->status;
/* Return error if the blob was invalid */
if (status == GB_BOOTROM_BOOT_STATUS_INVALID) {
ret = -EINVAL;
goto queue_work;
}
/*
* XXX Should we return error for insecure firmware?
*/
dev_dbg(dev, "ready to boot: 0x%x, 0\n", status);
queue_work:
/*
* Refresh timeout, the Interface shall load the new personality and
* send a new hotplug request, which shall get rid of the bootrom
* connection. As that can take some time, increase the timeout a bit.
*/
gb_bootrom_set_timeout(bootrom, NEXT_REQ_MODE_SWITCH,
MODE_SWITCH_TIMEOUT_MS);
return ret;
}
static int gb_bootrom_request_handler(struct gb_operation *op)
{
u8 type = op->type;
switch (type) {
case GB_BOOTROM_TYPE_FIRMWARE_SIZE:
return gb_bootrom_firmware_size_request(op);
case GB_BOOTROM_TYPE_GET_FIRMWARE:
return gb_bootrom_get_firmware(op);
case GB_BOOTROM_TYPE_READY_TO_BOOT:
return gb_bootrom_ready_to_boot(op);
default:
dev_err(&op->connection->bundle->dev,
"unsupported request: %u\n", type);
return -EINVAL;
}
}
static int gb_bootrom_get_version(struct gb_bootrom *bootrom)
{
struct gb_bundle *bundle = bootrom->connection->bundle;
struct gb_bootrom_version_request request;
struct gb_bootrom_version_response response;
int ret;
request.major = GB_BOOTROM_VERSION_MAJOR;
request.minor = GB_BOOTROM_VERSION_MINOR;
ret = gb_operation_sync(bootrom->connection,
GB_BOOTROM_TYPE_VERSION,
&request, sizeof(request), &response,
sizeof(response));
if (ret) {
dev_err(&bundle->dev,
"failed to get protocol version: %d\n",
ret);
return ret;
}
if (response.major > request.major) {
dev_err(&bundle->dev,
"unsupported major protocol version (%u > %u)\n",
response.major, request.major);
return -ENOTSUPP;
}
bootrom->protocol_major = response.major;
bootrom->protocol_minor = response.minor;
dev_dbg(&bundle->dev, "%s - %u.%u\n", __func__, response.major,
response.minor);
return 0;
}
static int gb_bootrom_probe(struct gb_bundle *bundle,
const struct greybus_bundle_id *id)
{
struct greybus_descriptor_cport *cport_desc;
struct gb_connection *connection;
struct gb_bootrom *bootrom;
int ret;
if (bundle->num_cports != 1)
return -ENODEV;
cport_desc = &bundle->cport_desc[0];
if (cport_desc->protocol_id != GREYBUS_PROTOCOL_BOOTROM)
return -ENODEV;
bootrom = kzalloc(sizeof(*bootrom), GFP_KERNEL);
if (!bootrom)
return -ENOMEM;
connection = gb_connection_create(bundle,
le16_to_cpu(cport_desc->id),
gb_bootrom_request_handler);
if (IS_ERR(connection)) {
ret = PTR_ERR(connection);
goto err_free_bootrom;
}
gb_connection_set_data(connection, bootrom);
bootrom->connection = connection;
mutex_init(&bootrom->mutex);
INIT_DELAYED_WORK(&bootrom->dwork, gb_bootrom_timedout);
greybus_set_drvdata(bundle, bootrom);
ret = gb_connection_enable_tx(connection);
if (ret)
goto err_connection_destroy;
ret = gb_bootrom_get_version(bootrom);
if (ret)
goto err_connection_disable;
bootrom_es2_fixup_vid_pid(bootrom);
ret = gb_connection_enable(connection);
if (ret)
goto err_connection_disable;
/* Refresh timeout */
gb_bootrom_set_timeout(bootrom, NEXT_REQ_FIRMWARE_SIZE,
NEXT_REQ_TIMEOUT_MS);
/* Tell bootrom we're ready. */
ret = gb_operation_sync(connection, GB_BOOTROM_TYPE_AP_READY, NULL, 0,
NULL, 0);
if (ret) {
dev_err(&connection->bundle->dev,
"failed to send AP READY: %d\n", ret);
goto err_cancel_timeout;
}
dev_dbg(&bundle->dev, "AP_READY sent\n");
return 0;
err_cancel_timeout:
gb_bootrom_cancel_timeout(bootrom);
err_connection_disable:
gb_connection_disable(connection);
err_connection_destroy:
gb_connection_destroy(connection);
err_free_bootrom:
kfree(bootrom);
return ret;
}
static void gb_bootrom_disconnect(struct gb_bundle *bundle)
{
struct gb_bootrom *bootrom = greybus_get_drvdata(bundle);
dev_dbg(&bundle->dev, "%s\n", __func__);
gb_connection_disable(bootrom->connection);
/* Disable timeouts */
gb_bootrom_cancel_timeout(bootrom);
/*
* Release firmware:
*
* As the connection and the delayed work are already disabled, we don't
* need to lock access to bootrom->fw here.
*/
free_firmware(bootrom);
gb_connection_destroy(bootrom->connection);
kfree(bootrom);
}
static const struct greybus_bundle_id gb_bootrom_id_table[] = {
{ GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_BOOTROM) },
{ }
};
static struct greybus_driver gb_bootrom_driver = {
.name = "bootrom",
.probe = gb_bootrom_probe,
.disconnect = gb_bootrom_disconnect,
.id_table = gb_bootrom_id_table,
};
module_greybus_driver(gb_bootrom_driver);
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,254 @@
/*
* Greybus bundles
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include "greybus.h"
#include "greybus_trace.h"
static ssize_t bundle_class_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gb_bundle *bundle = to_gb_bundle(dev);
return sprintf(buf, "0x%02x\n", bundle->class);
}
static DEVICE_ATTR_RO(bundle_class);
static ssize_t bundle_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gb_bundle *bundle = to_gb_bundle(dev);
return sprintf(buf, "%u\n", bundle->id);
}
static DEVICE_ATTR_RO(bundle_id);
static ssize_t state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct gb_bundle *bundle = to_gb_bundle(dev);
if (bundle->state == NULL)
return sprintf(buf, "\n");
return sprintf(buf, "%s\n", bundle->state);
}
static ssize_t state_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct gb_bundle *bundle = to_gb_bundle(dev);
kfree(bundle->state);
bundle->state = kstrdup(buf, GFP_KERNEL);
if (!bundle->state)
return -ENOMEM;
/* Tell userspace that the file contents changed */
sysfs_notify(&bundle->dev.kobj, NULL, "state");
return size;
}
static DEVICE_ATTR_RW(state);
static struct attribute *bundle_attrs[] = {
&dev_attr_bundle_class.attr,
&dev_attr_bundle_id.attr,
&dev_attr_state.attr,
NULL,
};
ATTRIBUTE_GROUPS(bundle);
static struct gb_bundle *gb_bundle_find(struct gb_interface *intf,
u8 bundle_id)
{
struct gb_bundle *bundle;
list_for_each_entry(bundle, &intf->bundles, links) {
if (bundle->id == bundle_id)
return bundle;
}
return NULL;
}
static void gb_bundle_release(struct device *dev)
{
struct gb_bundle *bundle = to_gb_bundle(dev);
trace_gb_bundle_release(bundle);
kfree(bundle->state);
kfree(bundle->cport_desc);
kfree(bundle);
}
#ifdef CONFIG_PM_RUNTIME
static void gb_bundle_disable_all_connections(struct gb_bundle *bundle)
{
struct gb_connection *connection;
list_for_each_entry(connection, &bundle->connections, bundle_links)
gb_connection_disable(connection);
}
static void gb_bundle_enable_all_connections(struct gb_bundle *bundle)
{
struct gb_connection *connection;
list_for_each_entry(connection, &bundle->connections, bundle_links)
gb_connection_enable(connection);
}
static int gb_bundle_suspend(struct device *dev)
{
struct gb_bundle *bundle = to_gb_bundle(dev);
const struct dev_pm_ops *pm = dev->driver->pm;
int ret;
if (pm && pm->runtime_suspend) {
ret = pm->runtime_suspend(&bundle->dev);
if (ret)
return ret;
} else {
gb_bundle_disable_all_connections(bundle);
}
ret = gb_control_bundle_suspend(bundle->intf->control, bundle->id);
if (ret) {
if (pm && pm->runtime_resume)
ret = pm->runtime_resume(dev);
else
gb_bundle_enable_all_connections(bundle);
return ret;
}
return 0;
}
static int gb_bundle_resume(struct device *dev)
{
struct gb_bundle *bundle = to_gb_bundle(dev);
const struct dev_pm_ops *pm = dev->driver->pm;
int ret;
ret = gb_control_bundle_resume(bundle->intf->control, bundle->id);
if (ret)
return ret;
if (pm && pm->runtime_resume) {
ret = pm->runtime_resume(dev);
if (ret)
return ret;
} else {
gb_bundle_enable_all_connections(bundle);
}
return 0;
}
static int gb_bundle_idle(struct device *dev)
{
pm_runtime_mark_last_busy(dev);
pm_request_autosuspend(dev);
return 0;
}
#endif
static const struct dev_pm_ops gb_bundle_pm_ops = {
SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle)
};
struct device_type greybus_bundle_type = {
.name = "greybus_bundle",
.release = gb_bundle_release,
.pm = &gb_bundle_pm_ops,
};
/*
* Create a gb_bundle structure to represent a discovered
* bundle. Returns a pointer to the new bundle or a null
* pointer if a failure occurs due to memory exhaustion.
*/
struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
u8 class)
{
struct gb_bundle *bundle;
if (bundle_id == BUNDLE_ID_NONE) {
dev_err(&intf->dev, "can't use bundle id %u\n", bundle_id);
return NULL;
}
/*
* Reject any attempt to reuse a bundle id. We initialize
* these serially, so there's no need to worry about keeping
* the interface bundle list locked here.
*/
if (gb_bundle_find(intf, bundle_id)) {
dev_err(&intf->dev, "duplicate bundle id %u\n", bundle_id);
return NULL;
}
bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
if (!bundle)
return NULL;
bundle->intf = intf;
bundle->id = bundle_id;
bundle->class = class;
INIT_LIST_HEAD(&bundle->connections);
bundle->dev.parent = &intf->dev;
bundle->dev.bus = &greybus_bus_type;
bundle->dev.type = &greybus_bundle_type;
bundle->dev.groups = bundle_groups;
bundle->dev.dma_mask = intf->dev.dma_mask;
device_initialize(&bundle->dev);
dev_set_name(&bundle->dev, "%s.%d", dev_name(&intf->dev), bundle_id);
list_add(&bundle->links, &intf->bundles);
trace_gb_bundle_create(bundle);
return bundle;
}
int gb_bundle_add(struct gb_bundle *bundle)
{
int ret;
ret = device_add(&bundle->dev);
if (ret) {
dev_err(&bundle->dev, "failed to register bundle: %d\n", ret);
return ret;
}
trace_gb_bundle_add(bundle);
return 0;
}
/*
* Tear down a previously set up bundle.
*/
void gb_bundle_destroy(struct gb_bundle *bundle)
{
trace_gb_bundle_destroy(bundle);
if (device_is_registered(&bundle->dev))
device_del(&bundle->dev);
list_del(&bundle->links);
put_device(&bundle->dev);
}

View File

@ -0,0 +1,90 @@
/*
* Greybus bundles
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __BUNDLE_H
#define __BUNDLE_H
#include <linux/list.h>
#define BUNDLE_ID_NONE U8_MAX
/* Greybus "public" definitions" */
struct gb_bundle {
struct device dev;
struct gb_interface *intf;
u8 id;
u8 class;
u8 class_major;
u8 class_minor;
size_t num_cports;
struct greybus_descriptor_cport *cport_desc;
struct list_head connections;
u8 *state;
struct list_head links; /* interface->bundles */
};
#define to_gb_bundle(d) container_of(d, struct gb_bundle, dev)
/* Greybus "private" definitions" */
struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
u8 class);
int gb_bundle_add(struct gb_bundle *bundle);
void gb_bundle_destroy(struct gb_bundle *bundle);
/* Bundle Runtime PM wrappers */
#ifdef CONFIG_PM_RUNTIME
static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
{
int retval;
retval = pm_runtime_get_sync(&bundle->dev);
if (retval < 0) {
dev_err(&bundle->dev,
"pm_runtime_get_sync failed: %d\n", retval);
pm_runtime_put_noidle(&bundle->dev);
return retval;
}
return 0;
}
static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
{
int retval;
pm_runtime_mark_last_busy(&bundle->dev);
retval = pm_runtime_put_autosuspend(&bundle->dev);
return retval;
}
static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle)
{
pm_runtime_get_noresume(&bundle->dev);
}
static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle)
{
pm_runtime_put_noidle(&bundle->dev);
}
#else
static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
{ return 0; }
static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
{ return 0; }
static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) {}
static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) {}
#endif
#endif /* __BUNDLE_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,938 @@
/*
* Greybus connections
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/workqueue.h>
#include "greybus.h"
#include "greybus_trace.h"
#define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000
static void gb_connection_kref_release(struct kref *kref);
static DEFINE_SPINLOCK(gb_connections_lock);
static DEFINE_MUTEX(gb_connection_mutex);
/* Caller holds gb_connection_mutex. */
static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
{
struct gb_host_device *hd = intf->hd;
struct gb_connection *connection;
list_for_each_entry(connection, &hd->connections, hd_links) {
if (connection->intf == intf &&
connection->intf_cport_id == cport_id)
return true;
}
return false;
}
static void gb_connection_get(struct gb_connection *connection)
{
kref_get(&connection->kref);
trace_gb_connection_get(connection);
}
static void gb_connection_put(struct gb_connection *connection)
{
trace_gb_connection_put(connection);
kref_put(&connection->kref, gb_connection_kref_release);
}
/*
* Returns a reference-counted pointer to the connection if found.
*/
static struct gb_connection *
gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
{
struct gb_connection *connection;
unsigned long flags;
spin_lock_irqsave(&gb_connections_lock, flags);
list_for_each_entry(connection, &hd->connections, hd_links)
if (connection->hd_cport_id == cport_id) {
gb_connection_get(connection);
goto found;
}
connection = NULL;
found:
spin_unlock_irqrestore(&gb_connections_lock, flags);
return connection;
}
/*
* Callback from the host driver to let us know that data has been
* received on the bundle.
*/
void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
u8 *data, size_t length)
{
struct gb_connection *connection;
trace_gb_hd_in(hd);
connection = gb_connection_hd_find(hd, cport_id);
if (!connection) {
dev_err(&hd->dev,
"nonexistent connection (%zu bytes dropped)\n", length);
return;
}
gb_connection_recv(connection, data, length);
gb_connection_put(connection);
}
EXPORT_SYMBOL_GPL(greybus_data_rcvd);
static void gb_connection_kref_release(struct kref *kref)
{
struct gb_connection *connection;
connection = container_of(kref, struct gb_connection, kref);
trace_gb_connection_release(connection);
kfree(connection);
}
static void gb_connection_init_name(struct gb_connection *connection)
{
u16 hd_cport_id = connection->hd_cport_id;
u16 cport_id = 0;
u8 intf_id = 0;
if (connection->intf) {
intf_id = connection->intf->interface_id;
cport_id = connection->intf_cport_id;
}
snprintf(connection->name, sizeof(connection->name),
"%u/%u:%u", hd_cport_id, intf_id, cport_id);
}
/*
* _gb_connection_create() - create a Greybus connection
* @hd: host device of the connection
* @hd_cport_id: host-device cport id, or -1 for dynamic allocation
* @intf: remote interface, or NULL for static connections
* @bundle: remote-interface bundle (may be NULL)
* @cport_id: remote-interface cport id, or 0 for static connections
* @handler: request handler (may be NULL)
* @flags: connection flags
*
* Create a Greybus connection, representing the bidirectional link
* between a CPort on a (local) Greybus host device and a CPort on
* another Greybus interface.
*
* A connection also maintains the state of operations sent over the
* connection.
*
* Serialised against concurrent create and destroy using the
* gb_connection_mutex.
*
* Return: A pointer to the new connection if successful, or an ERR_PTR
* otherwise.
*/
static struct gb_connection *
_gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
struct gb_interface *intf,
struct gb_bundle *bundle, int cport_id,
gb_request_handler_t handler,
unsigned long flags)
{
struct gb_connection *connection;
int ret;
mutex_lock(&gb_connection_mutex);
if (intf && gb_connection_cport_in_use(intf, cport_id)) {
dev_err(&intf->dev, "cport %u already in use\n", cport_id);
ret = -EBUSY;
goto err_unlock;
}
ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
if (ret < 0) {
dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
goto err_unlock;
}
hd_cport_id = ret;
connection = kzalloc(sizeof(*connection), GFP_KERNEL);
if (!connection) {
ret = -ENOMEM;
goto err_hd_cport_release;
}
connection->hd_cport_id = hd_cport_id;
connection->intf_cport_id = cport_id;
connection->hd = hd;
connection->intf = intf;
connection->bundle = bundle;
connection->handler = handler;
connection->flags = flags;
if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
connection->state = GB_CONNECTION_STATE_DISABLED;
atomic_set(&connection->op_cycle, 0);
mutex_init(&connection->mutex);
spin_lock_init(&connection->lock);
INIT_LIST_HEAD(&connection->operations);
connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
dev_name(&hd->dev), hd_cport_id);
if (!connection->wq) {
ret = -ENOMEM;
goto err_free_connection;
}
kref_init(&connection->kref);
gb_connection_init_name(connection);
spin_lock_irq(&gb_connections_lock);
list_add(&connection->hd_links, &hd->connections);
if (bundle)
list_add(&connection->bundle_links, &bundle->connections);
else
INIT_LIST_HEAD(&connection->bundle_links);
spin_unlock_irq(&gb_connections_lock);
mutex_unlock(&gb_connection_mutex);
trace_gb_connection_create(connection);
return connection;
err_free_connection:
kfree(connection);
err_hd_cport_release:
gb_hd_cport_release(hd, hd_cport_id);
err_unlock:
mutex_unlock(&gb_connection_mutex);
return ERR_PTR(ret);
}
struct gb_connection *
gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
gb_request_handler_t handler)
{
return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
GB_CONNECTION_FLAG_HIGH_PRIO);
}
struct gb_connection *
gb_connection_create_control(struct gb_interface *intf)
{
return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
GB_CONNECTION_FLAG_CONTROL |
GB_CONNECTION_FLAG_HIGH_PRIO);
}
struct gb_connection *
gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
gb_request_handler_t handler)
{
struct gb_interface *intf = bundle->intf;
return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
handler, 0);
}
EXPORT_SYMBOL_GPL(gb_connection_create);
struct gb_connection *
gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
gb_request_handler_t handler,
unsigned long flags)
{
struct gb_interface *intf = bundle->intf;
if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
handler, flags);
}
EXPORT_SYMBOL_GPL(gb_connection_create_flags);
struct gb_connection *
gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
unsigned long flags)
{
flags |= GB_CONNECTION_FLAG_OFFLOADED;
return gb_connection_create_flags(bundle, cport_id, NULL, flags);
}
EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
static int gb_connection_hd_cport_enable(struct gb_connection *connection)
{
struct gb_host_device *hd = connection->hd;
int ret;
if (!hd->driver->cport_enable)
return 0;
ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
connection->flags);
if (ret) {
dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
connection->name, ret);
return ret;
}
return 0;
}
static void gb_connection_hd_cport_disable(struct gb_connection *connection)
{
struct gb_host_device *hd = connection->hd;
int ret;
if (!hd->driver->cport_disable)
return;
ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
if (ret) {
dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
connection->name, ret);
}
}
static int gb_connection_hd_cport_connected(struct gb_connection *connection)
{
struct gb_host_device *hd = connection->hd;
int ret;
if (!hd->driver->cport_connected)
return 0;
ret = hd->driver->cport_connected(hd, connection->hd_cport_id);
if (ret) {
dev_err(&hd->dev, "%s: failed to set connected state: %d\n",
connection->name, ret);
return ret;
}
return 0;
}
static int gb_connection_hd_cport_flush(struct gb_connection *connection)
{
struct gb_host_device *hd = connection->hd;
int ret;
if (!hd->driver->cport_flush)
return 0;
ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
if (ret) {
dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
connection->name, ret);
return ret;
}
return 0;
}
static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
{
struct gb_host_device *hd = connection->hd;
size_t peer_space;
int ret;
peer_space = sizeof(struct gb_operation_msg_hdr) +
sizeof(struct gb_cport_shutdown_request);
if (connection->mode_switch)
peer_space += sizeof(struct gb_operation_msg_hdr);
ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
peer_space,
GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
if (ret) {
dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n",
connection->name, ret);
return ret;
}
return 0;
}
static int gb_connection_hd_cport_clear(struct gb_connection *connection)
{
struct gb_host_device *hd = connection->hd;
int ret;
ret = hd->driver->cport_clear(hd, connection->hd_cport_id);
if (ret) {
dev_err(&hd->dev, "%s: failed to clear host cport: %d\n",
connection->name, ret);
return ret;
}
return 0;
}
/*
* Request the SVC to create a connection from AP's cport to interface's
* cport.
*/
static int
gb_connection_svc_connection_create(struct gb_connection *connection)
{
struct gb_host_device *hd = connection->hd;
struct gb_interface *intf;
u8 cport_flags;
int ret;
if (gb_connection_is_static(connection))
return 0;
intf = connection->intf;
/*
* Enable either E2EFC or CSD, unless no flow control is requested.
*/
cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
if (gb_connection_flow_control_disabled(connection)) {
cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
} else if (gb_connection_e2efc_enabled(connection)) {
cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
GB_SVC_CPORT_FLAG_E2EFC;
}
ret = gb_svc_connection_create(hd->svc,
hd->svc->ap_intf_id,
connection->hd_cport_id,
intf->interface_id,
connection->intf_cport_id,
cport_flags);
if (ret) {
dev_err(&connection->hd->dev,
"%s: failed to create svc connection: %d\n",
connection->name, ret);
return ret;
}
return 0;
}
static void
gb_connection_svc_connection_destroy(struct gb_connection *connection)
{
if (gb_connection_is_static(connection))
return;
gb_svc_connection_destroy(connection->hd->svc,
connection->hd->svc->ap_intf_id,
connection->hd_cport_id,
connection->intf->interface_id,
connection->intf_cport_id);
}
/* Inform Interface about active CPorts */
static int gb_connection_control_connected(struct gb_connection *connection)
{
struct gb_control *control;
u16 cport_id = connection->intf_cport_id;
int ret;
if (gb_connection_is_static(connection))
return 0;
if (gb_connection_is_control(connection))
return 0;
control = connection->intf->control;
ret = gb_control_connected_operation(control, cport_id);
if (ret) {
dev_err(&connection->bundle->dev,
"failed to connect cport: %d\n", ret);
return ret;
}
return 0;
}
static void
gb_connection_control_disconnecting(struct gb_connection *connection)
{
struct gb_control *control;
u16 cport_id = connection->intf_cport_id;
int ret;
if (gb_connection_is_static(connection))
return;
control = connection->intf->control;
ret = gb_control_disconnecting_operation(control, cport_id);
if (ret) {
dev_err(&connection->hd->dev,
"%s: failed to send disconnecting: %d\n",
connection->name, ret);
}
}
static void
gb_connection_control_disconnected(struct gb_connection *connection)
{
struct gb_control *control;
u16 cport_id = connection->intf_cport_id;
int ret;
if (gb_connection_is_static(connection))
return;
control = connection->intf->control;
if (gb_connection_is_control(connection)) {
if (connection->mode_switch) {
ret = gb_control_mode_switch_operation(control);
if (ret) {
/*
* Allow mode switch to time out waiting for
* mailbox event.
*/
return;
}
}
return;
}
ret = gb_control_disconnected_operation(control, cport_id);
if (ret) {
dev_warn(&connection->bundle->dev,
"failed to disconnect cport: %d\n", ret);
}
}
static int gb_connection_shutdown_operation(struct gb_connection *connection,
u8 phase)
{
struct gb_cport_shutdown_request *req;
struct gb_operation *operation;
int ret;
operation = gb_operation_create_core(connection,
GB_REQUEST_TYPE_CPORT_SHUTDOWN,
sizeof(*req), 0, 0,
GFP_KERNEL);
if (!operation)
return -ENOMEM;
req = operation->request->payload;
req->phase = phase;
ret = gb_operation_request_send_sync(operation);
gb_operation_put(operation);
return ret;
}
static int gb_connection_cport_shutdown(struct gb_connection *connection,
u8 phase)
{
struct gb_host_device *hd = connection->hd;
const struct gb_hd_driver *drv = hd->driver;
int ret;
if (gb_connection_is_static(connection))
return 0;
if (gb_connection_is_offloaded(connection)) {
if (!drv->cport_shutdown)
return 0;
ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase,
GB_OPERATION_TIMEOUT_DEFAULT);
} else {
ret = gb_connection_shutdown_operation(connection, phase);
}
if (ret) {
dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n",
connection->name, phase, ret);
return ret;
}
return 0;
}
static int
gb_connection_cport_shutdown_phase_1(struct gb_connection *connection)
{
return gb_connection_cport_shutdown(connection, 1);
}
static int
gb_connection_cport_shutdown_phase_2(struct gb_connection *connection)
{
return gb_connection_cport_shutdown(connection, 2);
}
/*
* Cancel all active operations on a connection.
*
* Locking: Called with connection lock held and state set to DISABLED or
* DISCONNECTING.
*/
static void gb_connection_cancel_operations(struct gb_connection *connection,
int errno)
__must_hold(&connection->lock)
{
struct gb_operation *operation;
while (!list_empty(&connection->operations)) {
operation = list_last_entry(&connection->operations,
struct gb_operation, links);
gb_operation_get(operation);
spin_unlock_irq(&connection->lock);
if (gb_operation_is_incoming(operation))
gb_operation_cancel_incoming(operation, errno);
else
gb_operation_cancel(operation, errno);
gb_operation_put(operation);
spin_lock_irq(&connection->lock);
}
}
/*
* Cancel all active incoming operations on a connection.
*
* Locking: Called with connection lock held and state set to ENABLED_TX.
*/
static void
gb_connection_flush_incoming_operations(struct gb_connection *connection,
int errno)
__must_hold(&connection->lock)
{
struct gb_operation *operation;
bool incoming;
while (!list_empty(&connection->operations)) {
incoming = false;
list_for_each_entry(operation, &connection->operations,
links) {
if (gb_operation_is_incoming(operation)) {
gb_operation_get(operation);
incoming = true;
break;
}
}
if (!incoming)
break;
spin_unlock_irq(&connection->lock);
/* FIXME: flush, not cancel? */
gb_operation_cancel_incoming(operation, errno);
gb_operation_put(operation);
spin_lock_irq(&connection->lock);
}
}
/*
* _gb_connection_enable() - enable a connection
* @connection: connection to enable
* @rx: whether to enable incoming requests
*
* Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
* ENABLED_TX->ENABLED state transitions.
*
* Locking: Caller holds connection->mutex.
*/
static int _gb_connection_enable(struct gb_connection *connection, bool rx)
{
int ret;
/* Handle ENABLED_TX -> ENABLED transitions. */
if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
if (!(connection->handler && rx))
return 0;
spin_lock_irq(&connection->lock);
connection->state = GB_CONNECTION_STATE_ENABLED;
spin_unlock_irq(&connection->lock);
return 0;
}
ret = gb_connection_hd_cport_enable(connection);
if (ret)
return ret;
ret = gb_connection_svc_connection_create(connection);
if (ret)
goto err_hd_cport_clear;
ret = gb_connection_hd_cport_connected(connection);
if (ret)
goto err_svc_connection_destroy;
spin_lock_irq(&connection->lock);
if (connection->handler && rx)
connection->state = GB_CONNECTION_STATE_ENABLED;
else
connection->state = GB_CONNECTION_STATE_ENABLED_TX;
spin_unlock_irq(&connection->lock);
ret = gb_connection_control_connected(connection);
if (ret)
goto err_control_disconnecting;
return 0;
err_control_disconnecting:
spin_lock_irq(&connection->lock);
connection->state = GB_CONNECTION_STATE_DISCONNECTING;
gb_connection_cancel_operations(connection, -ESHUTDOWN);
spin_unlock_irq(&connection->lock);
/* Transmit queue should already be empty. */
gb_connection_hd_cport_flush(connection);
gb_connection_control_disconnecting(connection);
gb_connection_cport_shutdown_phase_1(connection);
gb_connection_hd_cport_quiesce(connection);
gb_connection_cport_shutdown_phase_2(connection);
gb_connection_control_disconnected(connection);
connection->state = GB_CONNECTION_STATE_DISABLED;
err_svc_connection_destroy:
gb_connection_svc_connection_destroy(connection);
err_hd_cport_clear:
gb_connection_hd_cport_clear(connection);
gb_connection_hd_cport_disable(connection);
return ret;
}
int gb_connection_enable(struct gb_connection *connection)
{
int ret = 0;
mutex_lock(&connection->mutex);
if (connection->state == GB_CONNECTION_STATE_ENABLED)
goto out_unlock;
ret = _gb_connection_enable(connection, true);
if (!ret)
trace_gb_connection_enable(connection);
out_unlock:
mutex_unlock(&connection->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(gb_connection_enable);
int gb_connection_enable_tx(struct gb_connection *connection)
{
int ret = 0;
mutex_lock(&connection->mutex);
if (connection->state == GB_CONNECTION_STATE_ENABLED) {
ret = -EINVAL;
goto out_unlock;
}
if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
goto out_unlock;
ret = _gb_connection_enable(connection, false);
if (!ret)
trace_gb_connection_enable(connection);
out_unlock:
mutex_unlock(&connection->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
void gb_connection_disable_rx(struct gb_connection *connection)
{
mutex_lock(&connection->mutex);
spin_lock_irq(&connection->lock);
if (connection->state != GB_CONNECTION_STATE_ENABLED) {
spin_unlock_irq(&connection->lock);
goto out_unlock;
}
connection->state = GB_CONNECTION_STATE_ENABLED_TX;
gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
spin_unlock_irq(&connection->lock);
trace_gb_connection_disable(connection);
out_unlock:
mutex_unlock(&connection->mutex);
}
EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
void gb_connection_mode_switch_prepare(struct gb_connection *connection)
{
connection->mode_switch = true;
}
void gb_connection_mode_switch_complete(struct gb_connection *connection)
{
gb_connection_svc_connection_destroy(connection);
gb_connection_hd_cport_clear(connection);
gb_connection_hd_cport_disable(connection);
connection->mode_switch = false;
}
void gb_connection_disable(struct gb_connection *connection)
{
mutex_lock(&connection->mutex);
if (connection->state == GB_CONNECTION_STATE_DISABLED)
goto out_unlock;
trace_gb_connection_disable(connection);
spin_lock_irq(&connection->lock);
connection->state = GB_CONNECTION_STATE_DISCONNECTING;
gb_connection_cancel_operations(connection, -ESHUTDOWN);
spin_unlock_irq(&connection->lock);
gb_connection_hd_cport_flush(connection);
gb_connection_control_disconnecting(connection);
gb_connection_cport_shutdown_phase_1(connection);
gb_connection_hd_cport_quiesce(connection);
gb_connection_cport_shutdown_phase_2(connection);
gb_connection_control_disconnected(connection);
connection->state = GB_CONNECTION_STATE_DISABLED;
/* control-connection tear down is deferred when mode switching */
if (!connection->mode_switch) {
gb_connection_svc_connection_destroy(connection);
gb_connection_hd_cport_clear(connection);
gb_connection_hd_cport_disable(connection);
}
out_unlock:
mutex_unlock(&connection->mutex);
}
EXPORT_SYMBOL_GPL(gb_connection_disable);
/* Disable a connection without communicating with the remote end. */
void gb_connection_disable_forced(struct gb_connection *connection)
{
mutex_lock(&connection->mutex);
if (connection->state == GB_CONNECTION_STATE_DISABLED)
goto out_unlock;
trace_gb_connection_disable(connection);
spin_lock_irq(&connection->lock);
connection->state = GB_CONNECTION_STATE_DISABLED;
gb_connection_cancel_operations(connection, -ESHUTDOWN);
spin_unlock_irq(&connection->lock);
gb_connection_hd_cport_flush(connection);
gb_connection_svc_connection_destroy(connection);
gb_connection_hd_cport_clear(connection);
gb_connection_hd_cport_disable(connection);
out_unlock:
mutex_unlock(&connection->mutex);
}
EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
/* Caller must have disabled the connection before destroying it. */
void gb_connection_destroy(struct gb_connection *connection)
{
if (!connection)
return;
if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
gb_connection_disable(connection);
mutex_lock(&gb_connection_mutex);
spin_lock_irq(&gb_connections_lock);
list_del(&connection->bundle_links);
list_del(&connection->hd_links);
spin_unlock_irq(&gb_connections_lock);
destroy_workqueue(connection->wq);
gb_hd_cport_release(connection->hd, connection->hd_cport_id);
connection->hd_cport_id = CPORT_ID_BAD;
mutex_unlock(&gb_connection_mutex);
gb_connection_put(connection);
}
EXPORT_SYMBOL_GPL(gb_connection_destroy);
void gb_connection_latency_tag_enable(struct gb_connection *connection)
{
struct gb_host_device *hd = connection->hd;
int ret;
if (!hd->driver->latency_tag_enable)
return;
ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
if (ret) {
dev_err(&connection->hd->dev,
"%s: failed to enable latency tag: %d\n",
connection->name, ret);
}
}
EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
void gb_connection_latency_tag_disable(struct gb_connection *connection)
{
struct gb_host_device *hd = connection->hd;
int ret;
if (!hd->driver->latency_tag_disable)
return;
ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
if (ret) {
dev_err(&connection->hd->dev,
"%s: failed to disable latency tag: %d\n",
connection->name, ret);
}
}
EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);

View File

@ -0,0 +1,129 @@
/*
* Greybus connections
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __CONNECTION_H
#define __CONNECTION_H
#include <linux/list.h>
#include <linux/kfifo.h>
#define GB_CONNECTION_FLAG_CSD BIT(0)
#define GB_CONNECTION_FLAG_NO_FLOWCTRL BIT(1)
#define GB_CONNECTION_FLAG_OFFLOADED BIT(2)
#define GB_CONNECTION_FLAG_CDSI1 BIT(3)
#define GB_CONNECTION_FLAG_CONTROL BIT(4)
#define GB_CONNECTION_FLAG_HIGH_PRIO BIT(5)
#define GB_CONNECTION_FLAG_CORE_MASK GB_CONNECTION_FLAG_CONTROL
enum gb_connection_state {
GB_CONNECTION_STATE_DISABLED = 0,
GB_CONNECTION_STATE_ENABLED_TX = 1,
GB_CONNECTION_STATE_ENABLED = 2,
GB_CONNECTION_STATE_DISCONNECTING = 3,
};
struct gb_operation;
typedef int (*gb_request_handler_t)(struct gb_operation *);
struct gb_connection {
struct gb_host_device *hd;
struct gb_interface *intf;
struct gb_bundle *bundle;
struct kref kref;
u16 hd_cport_id;
u16 intf_cport_id;
struct list_head hd_links;
struct list_head bundle_links;
gb_request_handler_t handler;
unsigned long flags;
struct mutex mutex;
spinlock_t lock;
enum gb_connection_state state;
struct list_head operations;
char name[16];
struct workqueue_struct *wq;
atomic_t op_cycle;
void *private;
bool mode_switch;
};
struct gb_connection *gb_connection_create_static(struct gb_host_device *hd,
u16 hd_cport_id, gb_request_handler_t handler);
struct gb_connection *gb_connection_create_control(struct gb_interface *intf);
struct gb_connection *gb_connection_create(struct gb_bundle *bundle,
u16 cport_id, gb_request_handler_t handler);
struct gb_connection *gb_connection_create_flags(struct gb_bundle *bundle,
u16 cport_id, gb_request_handler_t handler,
unsigned long flags);
struct gb_connection *gb_connection_create_offloaded(struct gb_bundle *bundle,
u16 cport_id, unsigned long flags);
void gb_connection_destroy(struct gb_connection *connection);
static inline bool gb_connection_is_static(struct gb_connection *connection)
{
return !connection->intf;
}
int gb_connection_enable(struct gb_connection *connection);
int gb_connection_enable_tx(struct gb_connection *connection);
void gb_connection_disable_rx(struct gb_connection *connection);
void gb_connection_disable(struct gb_connection *connection);
void gb_connection_disable_forced(struct gb_connection *connection);
void gb_connection_mode_switch_prepare(struct gb_connection *connection);
void gb_connection_mode_switch_complete(struct gb_connection *connection);
void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
u8 *data, size_t length);
void gb_connection_latency_tag_enable(struct gb_connection *connection);
void gb_connection_latency_tag_disable(struct gb_connection *connection);
static inline bool gb_connection_e2efc_enabled(struct gb_connection *connection)
{
return !(connection->flags & GB_CONNECTION_FLAG_CSD);
}
static inline bool
gb_connection_flow_control_disabled(struct gb_connection *connection)
{
return connection->flags & GB_CONNECTION_FLAG_NO_FLOWCTRL;
}
static inline bool gb_connection_is_offloaded(struct gb_connection *connection)
{
return connection->flags & GB_CONNECTION_FLAG_OFFLOADED;
}
static inline bool gb_connection_is_control(struct gb_connection *connection)
{
return connection->flags & GB_CONNECTION_FLAG_CONTROL;
}
static inline void *gb_connection_get_data(struct gb_connection *connection)
{
return connection->private;
}
static inline void gb_connection_set_data(struct gb_connection *connection,
void *data)
{
connection->private = data;
}
#endif /* __CONNECTION_H */

View File

@ -0,0 +1,635 @@
/*
* Greybus CPort control protocol.
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "greybus.h"
/* Highest control-protocol version supported */
#define GB_CONTROL_VERSION_MAJOR 0
#define GB_CONTROL_VERSION_MINOR 1
static int gb_control_get_version(struct gb_control *control)
{
struct gb_interface *intf = control->connection->intf;
struct gb_control_version_request request;
struct gb_control_version_response response;
int ret;
request.major = GB_CONTROL_VERSION_MAJOR;
request.minor = GB_CONTROL_VERSION_MINOR;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_VERSION,
&request, sizeof(request), &response,
sizeof(response));
if (ret) {
dev_err(&intf->dev,
"failed to get control-protocol version: %d\n",
ret);
return ret;
}
if (response.major > request.major) {
dev_err(&intf->dev,
"unsupported major control-protocol version (%u > %u)\n",
response.major, request.major);
return -ENOTSUPP;
}
control->protocol_major = response.major;
control->protocol_minor = response.minor;
dev_dbg(&intf->dev, "%s - %u.%u\n", __func__, response.major,
response.minor);
return 0;
}
static int gb_control_get_bundle_version(struct gb_control *control,
struct gb_bundle *bundle)
{
struct gb_interface *intf = control->connection->intf;
struct gb_control_bundle_version_request request;
struct gb_control_bundle_version_response response;
int ret;
request.bundle_id = bundle->id;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_BUNDLE_VERSION,
&request, sizeof(request),
&response, sizeof(response));
if (ret) {
dev_err(&intf->dev,
"failed to get bundle %u class version: %d\n",
bundle->id, ret);
return ret;
}
bundle->class_major = response.major;
bundle->class_minor = response.minor;
dev_dbg(&intf->dev, "%s - %u: %u.%u\n", __func__, bundle->id,
response.major, response.minor);
return 0;
}
int gb_control_get_bundle_versions(struct gb_control *control)
{
struct gb_interface *intf = control->connection->intf;
struct gb_bundle *bundle;
int ret;
if (!control->has_bundle_version)
return 0;
list_for_each_entry(bundle, &intf->bundles, links) {
ret = gb_control_get_bundle_version(control, bundle);
if (ret)
return ret;
}
return 0;
}
/* Get Manifest's size from the interface */
int gb_control_get_manifest_size_operation(struct gb_interface *intf)
{
struct gb_control_get_manifest_size_response response;
struct gb_connection *connection = intf->control->connection;
int ret;
ret = gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST_SIZE,
NULL, 0, &response, sizeof(response));
if (ret) {
dev_err(&connection->intf->dev,
"failed to get manifest size: %d\n", ret);
return ret;
}
return le16_to_cpu(response.size);
}
/* Reads Manifest from the interface */
int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
size_t size)
{
struct gb_connection *connection = intf->control->connection;
return gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST,
NULL, 0, manifest, size);
}
int gb_control_connected_operation(struct gb_control *control, u16 cport_id)
{
struct gb_control_connected_request request;
request.cport_id = cpu_to_le16(cport_id);
return gb_operation_sync(control->connection, GB_CONTROL_TYPE_CONNECTED,
&request, sizeof(request), NULL, 0);
}
int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id)
{
struct gb_control_disconnected_request request;
request.cport_id = cpu_to_le16(cport_id);
return gb_operation_sync(control->connection,
GB_CONTROL_TYPE_DISCONNECTED, &request,
sizeof(request), NULL, 0);
}
int gb_control_disconnecting_operation(struct gb_control *control,
u16 cport_id)
{
struct gb_control_disconnecting_request *request;
struct gb_operation *operation;
int ret;
operation = gb_operation_create_core(control->connection,
GB_CONTROL_TYPE_DISCONNECTING,
sizeof(*request), 0, 0,
GFP_KERNEL);
if (!operation)
return -ENOMEM;
request = operation->request->payload;
request->cport_id = cpu_to_le16(cport_id);
ret = gb_operation_request_send_sync(operation);
if (ret) {
dev_err(&control->dev, "failed to send disconnecting: %d\n",
ret);
}
gb_operation_put(operation);
return ret;
}
int gb_control_mode_switch_operation(struct gb_control *control)
{
struct gb_operation *operation;
int ret;
operation = gb_operation_create_core(control->connection,
GB_CONTROL_TYPE_MODE_SWITCH,
0, 0, GB_OPERATION_FLAG_UNIDIRECTIONAL,
GFP_KERNEL);
if (!operation)
return -ENOMEM;
ret = gb_operation_request_send_sync(operation);
if (ret)
dev_err(&control->dev, "failed to send mode switch: %d\n", ret);
gb_operation_put(operation);
return ret;
}
int gb_control_timesync_enable(struct gb_control *control, u8 count,
u64 frame_time, u32 strobe_delay, u32 refclk)
{
struct gb_control_timesync_enable_request request;
request.count = count;
request.frame_time = cpu_to_le64(frame_time);
request.strobe_delay = cpu_to_le32(strobe_delay);
request.refclk = cpu_to_le32(refclk);
return gb_operation_sync(control->connection,
GB_CONTROL_TYPE_TIMESYNC_ENABLE, &request,
sizeof(request), NULL, 0);
}
int gb_control_timesync_disable(struct gb_control *control)
{
return gb_operation_sync(control->connection,
GB_CONTROL_TYPE_TIMESYNC_DISABLE, NULL, 0,
NULL, 0);
}
int gb_control_timesync_get_last_event(struct gb_control *control,
u64 *frame_time)
{
struct gb_control_timesync_get_last_event_response response;
int ret;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT,
NULL, 0, &response, sizeof(response));
if (!ret)
*frame_time = le64_to_cpu(response.frame_time);
return ret;
}
int gb_control_timesync_authoritative(struct gb_control *control,
u64 *frame_time)
{
struct gb_control_timesync_authoritative_request request;
int i;
for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
request.frame_time[i] = cpu_to_le64(frame_time[i]);
return gb_operation_sync(control->connection,
GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE,
&request, sizeof(request),
NULL, 0);
}
static int gb_control_bundle_pm_status_map(u8 status)
{
switch (status) {
case GB_CONTROL_BUNDLE_PM_INVAL:
return -EINVAL;
case GB_CONTROL_BUNDLE_PM_BUSY:
return -EBUSY;
case GB_CONTROL_BUNDLE_PM_NA:
return -ENOMSG;
case GB_CONTROL_BUNDLE_PM_FAIL:
default:
return -EREMOTEIO;
}
}
int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id)
{
struct gb_control_bundle_pm_request request;
struct gb_control_bundle_pm_response response;
int ret;
request.bundle_id = bundle_id;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_BUNDLE_SUSPEND, &request,
sizeof(request), &response, sizeof(response));
if (ret) {
dev_err(&control->dev, "failed to send bundle %u suspend: %d\n",
bundle_id, ret);
return ret;
}
if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
dev_err(&control->dev, "failed to suspend bundle %u: %d\n",
bundle_id, response.status);
return gb_control_bundle_pm_status_map(response.status);
}
return 0;
}
int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id)
{
struct gb_control_bundle_pm_request request;
struct gb_control_bundle_pm_response response;
int ret;
request.bundle_id = bundle_id;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_BUNDLE_RESUME, &request,
sizeof(request), &response, sizeof(response));
if (ret) {
dev_err(&control->dev, "failed to send bundle %u resume: %d\n",
bundle_id, ret);
return ret;
}
if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
dev_err(&control->dev, "failed to resume bundle %u: %d\n",
bundle_id, response.status);
return gb_control_bundle_pm_status_map(response.status);
}
return 0;
}
int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id)
{
struct gb_control_bundle_pm_request request;
struct gb_control_bundle_pm_response response;
int ret;
request.bundle_id = bundle_id;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_BUNDLE_DEACTIVATE, &request,
sizeof(request), &response, sizeof(response));
if (ret) {
dev_err(&control->dev,
"failed to send bundle %u deactivate: %d\n", bundle_id,
ret);
return ret;
}
if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
dev_err(&control->dev, "failed to deactivate bundle %u: %d\n",
bundle_id, response.status);
return gb_control_bundle_pm_status_map(response.status);
}
return 0;
}
int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id)
{
struct gb_control_bundle_pm_request request;
struct gb_control_bundle_pm_response response;
int ret;
if (!control->has_bundle_activate)
return 0;
request.bundle_id = bundle_id;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_BUNDLE_ACTIVATE, &request,
sizeof(request), &response, sizeof(response));
if (ret) {
dev_err(&control->dev,
"failed to send bundle %u activate: %d\n", bundle_id,
ret);
return ret;
}
if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
dev_err(&control->dev, "failed to activate bundle %u: %d\n",
bundle_id, response.status);
return gb_control_bundle_pm_status_map(response.status);
}
return 0;
}
static int gb_control_interface_pm_status_map(u8 status)
{
switch (status) {
case GB_CONTROL_INTF_PM_BUSY:
return -EBUSY;
case GB_CONTROL_INTF_PM_NA:
return -ENOMSG;
default:
return -EREMOTEIO;
}
}
int gb_control_interface_suspend_prepare(struct gb_control *control)
{
struct gb_control_intf_pm_response response;
int ret;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE, NULL, 0,
&response, sizeof(response));
if (ret) {
dev_err(&control->dev,
"failed to send interface suspend prepare: %d\n", ret);
return ret;
}
if (response.status != GB_CONTROL_INTF_PM_OK) {
dev_err(&control->dev, "interface error while preparing suspend: %d\n",
response.status);
return gb_control_interface_pm_status_map(response.status);
}
return 0;
}
int gb_control_interface_deactivate_prepare(struct gb_control *control)
{
struct gb_control_intf_pm_response response;
int ret;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE, NULL,
0, &response, sizeof(response));
if (ret) {
dev_err(&control->dev, "failed to send interface deactivate prepare: %d\n",
ret);
return ret;
}
if (response.status != GB_CONTROL_INTF_PM_OK) {
dev_err(&control->dev, "interface error while preparing deactivate: %d\n",
response.status);
return gb_control_interface_pm_status_map(response.status);
}
return 0;
}
int gb_control_interface_hibernate_abort(struct gb_control *control)
{
struct gb_control_intf_pm_response response;
int ret;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT, NULL, 0,
&response, sizeof(response));
if (ret) {
dev_err(&control->dev,
"failed to send interface aborting hibernate: %d\n",
ret);
return ret;
}
if (response.status != GB_CONTROL_INTF_PM_OK) {
dev_err(&control->dev, "interface error while aborting hibernate: %d\n",
response.status);
return gb_control_interface_pm_status_map(response.status);
}
return 0;
}
static ssize_t vendor_string_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gb_control *control = to_gb_control(dev);
return scnprintf(buf, PAGE_SIZE, "%s\n", control->vendor_string);
}
static DEVICE_ATTR_RO(vendor_string);
static ssize_t product_string_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gb_control *control = to_gb_control(dev);
return scnprintf(buf, PAGE_SIZE, "%s\n", control->product_string);
}
static DEVICE_ATTR_RO(product_string);
static struct attribute *control_attrs[] = {
&dev_attr_vendor_string.attr,
&dev_attr_product_string.attr,
NULL,
};
ATTRIBUTE_GROUPS(control);
static void gb_control_release(struct device *dev)
{
struct gb_control *control = to_gb_control(dev);
gb_connection_destroy(control->connection);
kfree(control->vendor_string);
kfree(control->product_string);
kfree(control);
}
struct device_type greybus_control_type = {
.name = "greybus_control",
.release = gb_control_release,
};
struct gb_control *gb_control_create(struct gb_interface *intf)
{
struct gb_connection *connection;
struct gb_control *control;
control = kzalloc(sizeof(*control), GFP_KERNEL);
if (!control)
return ERR_PTR(-ENOMEM);
control->intf = intf;
connection = gb_connection_create_control(intf);
if (IS_ERR(connection)) {
dev_err(&intf->dev,
"failed to create control connection: %ld\n",
PTR_ERR(connection));
kfree(control);
return ERR_CAST(connection);
}
control->connection = connection;
control->dev.parent = &intf->dev;
control->dev.bus = &greybus_bus_type;
control->dev.type = &greybus_control_type;
control->dev.groups = control_groups;
control->dev.dma_mask = intf->dev.dma_mask;
device_initialize(&control->dev);
dev_set_name(&control->dev, "%s.ctrl", dev_name(&intf->dev));
gb_connection_set_data(control->connection, control);
return control;
}
int gb_control_enable(struct gb_control *control)
{
int ret;
dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
ret = gb_connection_enable_tx(control->connection);
if (ret) {
dev_err(&control->connection->intf->dev,
"failed to enable control connection: %d\n",
ret);
return ret;
}
ret = gb_control_get_version(control);
if (ret)
goto err_disable_connection;
if (control->protocol_major > 0 || control->protocol_minor > 1)
control->has_bundle_version = true;
/* FIXME: use protocol version instead */
if (!(control->intf->quirks & GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE))
control->has_bundle_activate = true;
return 0;
err_disable_connection:
gb_connection_disable(control->connection);
return ret;
}
void gb_control_disable(struct gb_control *control)
{
dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
if (control->intf->disconnected)
gb_connection_disable_forced(control->connection);
else
gb_connection_disable(control->connection);
}
int gb_control_suspend(struct gb_control *control)
{
gb_connection_disable(control->connection);
return 0;
}
int gb_control_resume(struct gb_control *control)
{
int ret;
ret = gb_connection_enable_tx(control->connection);
if (ret) {
dev_err(&control->connection->intf->dev,
"failed to enable control connection: %d\n", ret);
return ret;
}
return 0;
}
int gb_control_add(struct gb_control *control)
{
int ret;
ret = device_add(&control->dev);
if (ret) {
dev_err(&control->dev,
"failed to register control device: %d\n",
ret);
return ret;
}
return 0;
}
void gb_control_del(struct gb_control *control)
{
if (device_is_registered(&control->dev))
device_del(&control->dev);
}
struct gb_control *gb_control_get(struct gb_control *control)
{
get_device(&control->dev);
return control;
}
void gb_control_put(struct gb_control *control)
{
put_device(&control->dev);
}
void gb_control_mode_switch_prepare(struct gb_control *control)
{
gb_connection_mode_switch_prepare(control->connection);
}
void gb_control_mode_switch_complete(struct gb_control *control)
{
gb_connection_mode_switch_complete(control->connection);
}

View File

@ -0,0 +1,65 @@
/*
* Greybus CPort control protocol
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __CONTROL_H
#define __CONTROL_H
struct gb_control {
struct device dev;
struct gb_interface *intf;
struct gb_connection *connection;
u8 protocol_major;
u8 protocol_minor;
bool has_bundle_activate;
bool has_bundle_version;
char *vendor_string;
char *product_string;
};
#define to_gb_control(d) container_of(d, struct gb_control, dev)
struct gb_control *gb_control_create(struct gb_interface *intf);
int gb_control_enable(struct gb_control *control);
void gb_control_disable(struct gb_control *control);
int gb_control_suspend(struct gb_control *control);
int gb_control_resume(struct gb_control *control);
int gb_control_add(struct gb_control *control);
void gb_control_del(struct gb_control *control);
struct gb_control *gb_control_get(struct gb_control *control);
void gb_control_put(struct gb_control *control);
int gb_control_get_bundle_versions(struct gb_control *control);
int gb_control_connected_operation(struct gb_control *control, u16 cport_id);
int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id);
int gb_control_disconnecting_operation(struct gb_control *control,
u16 cport_id);
int gb_control_mode_switch_operation(struct gb_control *control);
void gb_control_mode_switch_prepare(struct gb_control *control);
void gb_control_mode_switch_complete(struct gb_control *control);
int gb_control_get_manifest_size_operation(struct gb_interface *intf);
int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
size_t size);
int gb_control_timesync_enable(struct gb_control *control, u8 count,
u64 frame_time, u32 strobe_delay, u32 refclk);
int gb_control_timesync_disable(struct gb_control *control);
int gb_control_timesync_get_last_event(struct gb_control *control,
u64 *frame_time);
int gb_control_timesync_authoritative(struct gb_control *control,
u64 *frame_time);
int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id);
int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id);
int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id);
int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id);
int gb_control_interface_suspend_prepare(struct gb_control *control);
int gb_control_interface_deactivate_prepare(struct gb_control *control);
int gb_control_interface_hibernate_abort(struct gb_control *control);
#endif /* __CONTROL_H */

View File

@ -0,0 +1,361 @@
/*
* Greybus "Core"
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define CREATE_TRACE_POINTS
#include "greybus.h"
#include "greybus_trace.h"
#define GB_BUNDLE_AUTOSUSPEND_MS 3000
/* Allow greybus to be disabled at boot if needed */
static bool nogreybus;
#ifdef MODULE
module_param(nogreybus, bool, 0444);
#else
core_param(nogreybus, nogreybus, bool, 0444);
#endif
int greybus_disabled(void)
{
return nogreybus;
}
EXPORT_SYMBOL_GPL(greybus_disabled);
static bool greybus_match_one_id(struct gb_bundle *bundle,
const struct greybus_bundle_id *id)
{
if ((id->match_flags & GREYBUS_ID_MATCH_VENDOR) &&
(id->vendor != bundle->intf->vendor_id))
return false;
if ((id->match_flags & GREYBUS_ID_MATCH_PRODUCT) &&
(id->product != bundle->intf->product_id))
return false;
if ((id->match_flags & GREYBUS_ID_MATCH_CLASS) &&
(id->class != bundle->class))
return false;
return true;
}
static const struct greybus_bundle_id *
greybus_match_id(struct gb_bundle *bundle, const struct greybus_bundle_id *id)
{
if (id == NULL)
return NULL;
for (; id->vendor || id->product || id->class || id->driver_info;
id++) {
if (greybus_match_one_id(bundle, id))
return id;
}
return NULL;
}
static int greybus_match_device(struct device *dev, struct device_driver *drv)
{
struct greybus_driver *driver = to_greybus_driver(drv);
struct gb_bundle *bundle;
const struct greybus_bundle_id *id;
if (!is_gb_bundle(dev))
return 0;
bundle = to_gb_bundle(dev);
id = greybus_match_id(bundle, driver->id_table);
if (id)
return 1;
/* FIXME - Dynamic ids? */
return 0;
}
static int greybus_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct gb_host_device *hd;
struct gb_module *module = NULL;
struct gb_interface *intf = NULL;
struct gb_control *control = NULL;
struct gb_bundle *bundle = NULL;
struct gb_svc *svc = NULL;
if (is_gb_host_device(dev)) {
hd = to_gb_host_device(dev);
} else if (is_gb_module(dev)) {
module = to_gb_module(dev);
hd = module->hd;
} else if (is_gb_interface(dev)) {
intf = to_gb_interface(dev);
module = intf->module;
hd = intf->hd;
} else if (is_gb_control(dev)) {
control = to_gb_control(dev);
intf = control->intf;
module = intf->module;
hd = intf->hd;
} else if (is_gb_bundle(dev)) {
bundle = to_gb_bundle(dev);
intf = bundle->intf;
module = intf->module;
hd = intf->hd;
} else if (is_gb_svc(dev)) {
svc = to_gb_svc(dev);
hd = svc->hd;
} else {
dev_WARN(dev, "uevent for unknown greybus device \"type\"!\n");
return -EINVAL;
}
if (add_uevent_var(env, "BUS=%u", hd->bus_id))
return -ENOMEM;
if (module) {
if (add_uevent_var(env, "MODULE=%u", module->module_id))
return -ENOMEM;
}
if (intf) {
if (add_uevent_var(env, "INTERFACE=%u", intf->interface_id))
return -ENOMEM;
if (add_uevent_var(env, "GREYBUS_ID=%08x/%08x",
intf->vendor_id, intf->product_id))
return -ENOMEM;
}
if (bundle) {
// FIXME
// add a uevent that can "load" a bundle type
// This is what we need to bind a driver to so use the info
// in gmod here as well
if (add_uevent_var(env, "BUNDLE=%u", bundle->id))
return -ENOMEM;
if (add_uevent_var(env, "BUNDLE_CLASS=%02x", bundle->class))
return -ENOMEM;
}
return 0;
}
static void greybus_shutdown(struct device *dev)
{
if (is_gb_host_device(dev)) {
struct gb_host_device *hd;
hd = to_gb_host_device(dev);
gb_hd_shutdown(hd);
}
}
struct bus_type greybus_bus_type = {
.name = "greybus",
.match = greybus_match_device,
.uevent = greybus_uevent,
.shutdown = greybus_shutdown,
};
static int greybus_probe(struct device *dev)
{
struct greybus_driver *driver = to_greybus_driver(dev->driver);
struct gb_bundle *bundle = to_gb_bundle(dev);
const struct greybus_bundle_id *id;
int retval;
/* match id */
id = greybus_match_id(bundle, driver->id_table);
if (!id)
return -ENODEV;
retval = pm_runtime_get_sync(&bundle->intf->dev);
if (retval < 0) {
pm_runtime_put_noidle(&bundle->intf->dev);
return retval;
}
retval = gb_control_bundle_activate(bundle->intf->control, bundle->id);
if (retval) {
pm_runtime_put(&bundle->intf->dev);
return retval;
}
/*
* Unbound bundle devices are always deactivated. During probe, the
* Runtime PM is set to enabled and active and the usage count is
* incremented. If the driver supports runtime PM, it should call
* pm_runtime_put() in its probe routine and pm_runtime_get_sync()
* in remove routine.
*/
pm_runtime_set_autosuspend_delay(dev, GB_BUNDLE_AUTOSUSPEND_MS);
pm_runtime_use_autosuspend(dev);
pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
retval = driver->probe(bundle, id);
if (retval) {
/*
* Catch buggy drivers that fail to destroy their connections.
*/
WARN_ON(!list_empty(&bundle->connections));
gb_control_bundle_deactivate(bundle->intf->control, bundle->id);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
pm_runtime_put_noidle(dev);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_put(&bundle->intf->dev);
return retval;
}
gb_timesync_schedule_synchronous(bundle->intf);
pm_runtime_put(&bundle->intf->dev);
return 0;
}
static int greybus_remove(struct device *dev)
{
struct greybus_driver *driver = to_greybus_driver(dev->driver);
struct gb_bundle *bundle = to_gb_bundle(dev);
struct gb_connection *connection;
int retval;
retval = pm_runtime_get_sync(dev);
if (retval < 0)
dev_err(dev, "failed to resume bundle: %d\n", retval);
/*
* Disable (non-offloaded) connections early in case the interface is
* already gone to avoid unceccessary operation timeouts during
* driver disconnect. Otherwise, only disable incoming requests.
*/
list_for_each_entry(connection, &bundle->connections, bundle_links) {
if (gb_connection_is_offloaded(connection))
continue;
if (bundle->intf->disconnected)
gb_connection_disable_forced(connection);
else
gb_connection_disable_rx(connection);
}
driver->disconnect(bundle);
/* Catch buggy drivers that fail to destroy their connections. */
WARN_ON(!list_empty(&bundle->connections));
if (!bundle->intf->disconnected)
gb_control_bundle_deactivate(bundle->intf->control, bundle->id);
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_put_noidle(dev);
return 0;
}
int greybus_register_driver(struct greybus_driver *driver, struct module *owner,
const char *mod_name)
{
int retval;
if (greybus_disabled())
return -ENODEV;
driver->driver.bus = &greybus_bus_type;
driver->driver.name = driver->name;
driver->driver.probe = greybus_probe;
driver->driver.remove = greybus_remove;
driver->driver.owner = owner;
driver->driver.mod_name = mod_name;
retval = driver_register(&driver->driver);
if (retval)
return retval;
pr_info("registered new driver %s\n", driver->name);
return 0;
}
EXPORT_SYMBOL_GPL(greybus_register_driver);
void greybus_deregister_driver(struct greybus_driver *driver)
{
driver_unregister(&driver->driver);
}
EXPORT_SYMBOL_GPL(greybus_deregister_driver);
static int __init gb_init(void)
{
int retval;
if (greybus_disabled())
return -ENODEV;
BUILD_BUG_ON(CPORT_ID_MAX >= (long)CPORT_ID_BAD);
gb_debugfs_init();
retval = bus_register(&greybus_bus_type);
if (retval) {
pr_err("bus_register failed (%d)\n", retval);
goto error_bus;
}
retval = gb_hd_init();
if (retval) {
pr_err("gb_hd_init failed (%d)\n", retval);
goto error_hd;
}
retval = gb_operation_init();
if (retval) {
pr_err("gb_operation_init failed (%d)\n", retval);
goto error_operation;
}
retval = gb_timesync_init();
if (retval) {
pr_err("gb_timesync_init failed\n");
goto error_timesync;
}
return 0; /* Success */
error_timesync:
gb_operation_exit();
error_operation:
gb_hd_exit();
error_hd:
bus_unregister(&greybus_bus_type);
error_bus:
gb_debugfs_cleanup();
return retval;
}
module_init(gb_init);
static void __exit gb_exit(void)
{
gb_timesync_exit();
gb_operation_exit();
gb_hd_exit();
bus_unregister(&greybus_bus_type);
gb_debugfs_cleanup();
tracepoint_synchronize_unregister();
}
module_exit(gb_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");

View File

@ -0,0 +1,31 @@
/*
* Greybus debugfs code
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/debugfs.h>
#include "greybus.h"
static struct dentry *gb_debug_root;
void __init gb_debugfs_init(void)
{
gb_debug_root = debugfs_create_dir("greybus", NULL);
}
void gb_debugfs_cleanup(void)
{
debugfs_remove_recursive(gb_debug_root);
gb_debug_root = NULL;
}
struct dentry *gb_debugfs_get(void)
{
return gb_debug_root;
}
EXPORT_SYMBOL_GPL(gb_debugfs_get);

View File

@ -0,0 +1,11 @@
T: Bus=01 Lev=03 Prnt=07 Port=02 Cnt=03 Dev#= 12 Spd=12 MxCh= 0
D: Ver= 2.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs= 1
P: Vendor=ffff ProdID=0001 Rev= 1.00
S: Manufacturer=Greybus
S: Product=SVC Bridge
S: SerialNumber=12239
C:* #Ifs= 1 Cfg#= 1 Atr=c0 MxPwr=100mA
I:* If#= 0 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=es1_ap_driver
E: Ad=81(I) Atr=03(Int.) MxPS= 64 Ivl=64ms
E: Ad=82(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms
E: Ad=02(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,42 @@
/*
* Greybus Firmware Management Header
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __FIRMWARE_H
#define __FIRMWARE_H
#include "greybus.h"
#define FW_NAME_PREFIX "gmp_"
/*
* Length of the string in format: "FW_NAME_PREFIX""%08x_%08x_%08x_%08x_%s.tftf"
* (3 + 1 + 4 * (8 + 1) + 10 + 1 + 4 + 1)
*/
#define FW_NAME_SIZE 56
/* Firmware Management Protocol specific functions */
int fw_mgmt_init(void);
void fw_mgmt_exit(void);
struct gb_connection *to_fw_mgmt_connection(struct device *dev);
int gb_fw_mgmt_request_handler(struct gb_operation *op);
int gb_fw_mgmt_connection_init(struct gb_connection *connection);
void gb_fw_mgmt_connection_exit(struct gb_connection *connection);
/* Firmware Download Protocol specific functions */
int gb_fw_download_request_handler(struct gb_operation *op);
int gb_fw_download_connection_init(struct gb_connection *connection);
void gb_fw_download_connection_exit(struct gb_connection *connection);
/* CAP Protocol specific functions */
int cap_init(void);
void cap_exit(void);
int gb_cap_connection_init(struct gb_connection *connection);
void gb_cap_connection_exit(struct gb_connection *connection);
#endif /* __FIRMWARE_H */

View File

@ -0,0 +1,331 @@
/*
* Greybus Firmware Core Bundle Driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/firmware.h>
#include "firmware.h"
#include "greybus.h"
#include "spilib.h"
struct gb_fw_core {
struct gb_connection *download_connection;
struct gb_connection *mgmt_connection;
struct gb_connection *spi_connection;
struct gb_connection *cap_connection;
};
#ifndef SPI_CORE_SUPPORT_PM
static int fw_spi_prepare_transfer_hardware(struct device *dev)
{
return gb_pm_runtime_get_sync(to_gb_bundle(dev));
}
static void fw_spi_unprepare_transfer_hardware(struct device *dev)
{
gb_pm_runtime_put_autosuspend(to_gb_bundle(dev));
}
static struct spilib_ops __spilib_ops = {
.prepare_transfer_hardware = fw_spi_prepare_transfer_hardware,
.unprepare_transfer_hardware = fw_spi_unprepare_transfer_hardware,
};
static struct spilib_ops *spilib_ops = &__spilib_ops;
#else
static struct spilib_ops *spilib_ops = NULL;
#endif
struct gb_connection *to_fw_mgmt_connection(struct device *dev)
{
struct gb_fw_core *fw_core = dev_get_drvdata(dev);
return fw_core->mgmt_connection;
}
static int gb_fw_spi_connection_init(struct gb_connection *connection)
{
int ret;
if (!connection)
return 0;
ret = gb_connection_enable(connection);
if (ret)
return ret;
ret = gb_spilib_master_init(connection, &connection->bundle->dev,
spilib_ops);
if (ret) {
gb_connection_disable(connection);
return ret;
}
return 0;
}
static void gb_fw_spi_connection_exit(struct gb_connection *connection)
{
if (!connection)
return;
gb_spilib_master_exit(connection);
gb_connection_disable(connection);
}
static int gb_fw_core_probe(struct gb_bundle *bundle,
const struct greybus_bundle_id *id)
{
struct greybus_descriptor_cport *cport_desc;
struct gb_connection *connection;
struct gb_fw_core *fw_core;
int ret, i;
u16 cport_id;
u8 protocol_id;
fw_core = kzalloc(sizeof(*fw_core), GFP_KERNEL);
if (!fw_core)
return -ENOMEM;
/* Parse CPorts and create connections */
for (i = 0; i < bundle->num_cports; i++) {
cport_desc = &bundle->cport_desc[i];
cport_id = le16_to_cpu(cport_desc->id);
protocol_id = cport_desc->protocol_id;
switch (protocol_id) {
case GREYBUS_PROTOCOL_FW_MANAGEMENT:
/* Disallow multiple Firmware Management CPorts */
if (fw_core->mgmt_connection) {
dev_err(&bundle->dev,
"multiple management CPorts found\n");
ret = -EINVAL;
goto err_destroy_connections;
}
connection = gb_connection_create(bundle, cport_id,
gb_fw_mgmt_request_handler);
if (IS_ERR(connection)) {
ret = PTR_ERR(connection);
dev_err(&bundle->dev,
"failed to create management connection (%d)\n",
ret);
goto err_destroy_connections;
}
fw_core->mgmt_connection = connection;
break;
case GREYBUS_PROTOCOL_FW_DOWNLOAD:
/* Disallow multiple Firmware Download CPorts */
if (fw_core->download_connection) {
dev_err(&bundle->dev,
"multiple download CPorts found\n");
ret = -EINVAL;
goto err_destroy_connections;
}
connection = gb_connection_create(bundle, cport_id,
gb_fw_download_request_handler);
if (IS_ERR(connection)) {
dev_err(&bundle->dev, "failed to create download connection (%ld)\n",
PTR_ERR(connection));
} else {
fw_core->download_connection = connection;
}
break;
case GREYBUS_PROTOCOL_SPI:
/* Disallow multiple SPI CPorts */
if (fw_core->spi_connection) {
dev_err(&bundle->dev,
"multiple SPI CPorts found\n");
ret = -EINVAL;
goto err_destroy_connections;
}
connection = gb_connection_create(bundle, cport_id,
NULL);
if (IS_ERR(connection)) {
dev_err(&bundle->dev, "failed to create SPI connection (%ld)\n",
PTR_ERR(connection));
} else {
fw_core->spi_connection = connection;
}
break;
case GREYBUS_PROTOCOL_AUTHENTICATION:
/* Disallow multiple CAP CPorts */
if (fw_core->cap_connection) {
dev_err(&bundle->dev, "multiple Authentication CPorts found\n");
ret = -EINVAL;
goto err_destroy_connections;
}
connection = gb_connection_create(bundle, cport_id,
NULL);
if (IS_ERR(connection)) {
dev_err(&bundle->dev, "failed to create Authentication connection (%ld)\n",
PTR_ERR(connection));
} else {
fw_core->cap_connection = connection;
}
break;
default:
dev_err(&bundle->dev, "invalid protocol id (0x%02x)\n",
protocol_id);
ret = -EINVAL;
goto err_destroy_connections;
}
}
/* Firmware Management connection is mandatory */
if (!fw_core->mgmt_connection) {
dev_err(&bundle->dev, "missing management connection\n");
ret = -ENODEV;
goto err_destroy_connections;
}
ret = gb_fw_download_connection_init(fw_core->download_connection);
if (ret) {
/* We may still be able to work with the Interface */
dev_err(&bundle->dev, "failed to initialize firmware download connection, disable it (%d)\n",
ret);
gb_connection_destroy(fw_core->download_connection);
fw_core->download_connection = NULL;
}
ret = gb_fw_spi_connection_init(fw_core->spi_connection);
if (ret) {
/* We may still be able to work with the Interface */
dev_err(&bundle->dev, "failed to initialize SPI connection, disable it (%d)\n",
ret);
gb_connection_destroy(fw_core->spi_connection);
fw_core->spi_connection = NULL;
}
ret = gb_cap_connection_init(fw_core->cap_connection);
if (ret) {
/* We may still be able to work with the Interface */
dev_err(&bundle->dev, "failed to initialize CAP connection, disable it (%d)\n",
ret);
gb_connection_destroy(fw_core->cap_connection);
fw_core->cap_connection = NULL;
}
ret = gb_fw_mgmt_connection_init(fw_core->mgmt_connection);
if (ret) {
/* We may still be able to work with the Interface */
dev_err(&bundle->dev, "failed to initialize firmware management connection, disable it (%d)\n",
ret);
goto err_exit_connections;
}
greybus_set_drvdata(bundle, fw_core);
/* FIXME: Remove this after S2 Loader gets runtime PM support */
if (!(bundle->intf->quirks & GB_INTERFACE_QUIRK_NO_PM))
gb_pm_runtime_put_autosuspend(bundle);
return 0;
err_exit_connections:
gb_cap_connection_exit(fw_core->cap_connection);
gb_fw_spi_connection_exit(fw_core->spi_connection);
gb_fw_download_connection_exit(fw_core->download_connection);
err_destroy_connections:
gb_connection_destroy(fw_core->mgmt_connection);
gb_connection_destroy(fw_core->cap_connection);
gb_connection_destroy(fw_core->spi_connection);
gb_connection_destroy(fw_core->download_connection);
kfree(fw_core);
return ret;
}
static void gb_fw_core_disconnect(struct gb_bundle *bundle)
{
struct gb_fw_core *fw_core = greybus_get_drvdata(bundle);
int ret;
/* FIXME: Remove this after S2 Loader gets runtime PM support */
if (!(bundle->intf->quirks & GB_INTERFACE_QUIRK_NO_PM)) {
ret = gb_pm_runtime_get_sync(bundle);
if (ret)
gb_pm_runtime_get_noresume(bundle);
}
gb_fw_mgmt_connection_exit(fw_core->mgmt_connection);
gb_cap_connection_exit(fw_core->cap_connection);
gb_fw_spi_connection_exit(fw_core->spi_connection);
gb_fw_download_connection_exit(fw_core->download_connection);
gb_connection_destroy(fw_core->mgmt_connection);
gb_connection_destroy(fw_core->cap_connection);
gb_connection_destroy(fw_core->spi_connection);
gb_connection_destroy(fw_core->download_connection);
kfree(fw_core);
}
static const struct greybus_bundle_id gb_fw_core_id_table[] = {
{ GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_FW_MANAGEMENT) },
{ }
};
static struct greybus_driver gb_fw_core_driver = {
.name = "gb-firmware",
.probe = gb_fw_core_probe,
.disconnect = gb_fw_core_disconnect,
.id_table = gb_fw_core_id_table,
};
static int fw_core_init(void)
{
int ret;
ret = fw_mgmt_init();
if (ret) {
pr_err("Failed to initialize fw-mgmt core (%d)\n", ret);
return ret;
}
ret = cap_init();
if (ret) {
pr_err("Failed to initialize component authentication core (%d)\n",
ret);
goto fw_mgmt_exit;
}
ret = greybus_register(&gb_fw_core_driver);
if (ret)
goto cap_exit;
return 0;
cap_exit:
cap_exit();
fw_mgmt_exit:
fw_mgmt_exit();
return ret;
}
module_init(fw_core_init);
static void __exit fw_core_exit(void)
{
greybus_deregister(&gb_fw_core_driver);
cap_exit();
fw_mgmt_exit();
}
module_exit(fw_core_exit);
MODULE_ALIAS("greybus:firmware");
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
MODULE_DESCRIPTION("Greybus Firmware Bundle Driver");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,465 @@
/*
* Greybus Firmware Download Protocol Driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/firmware.h>
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include "firmware.h"
#include "greybus.h"
/* Estimated minimum buffer size, actual size can be smaller than this */
#define MIN_FETCH_SIZE 512
/* Timeout, in jiffies, within which fetch or release firmware must be called */
#define NEXT_REQ_TIMEOUT_J msecs_to_jiffies(1000)
struct fw_request {
u8 firmware_id;
bool disabled;
bool timedout;
char name[FW_NAME_SIZE];
const struct firmware *fw;
struct list_head node;
struct delayed_work dwork;
/* Timeout, in jiffies, within which the firmware shall download */
unsigned long release_timeout_j;
struct kref kref;
struct fw_download *fw_download;
};
struct fw_download {
struct device *parent;
struct gb_connection *connection;
struct list_head fw_requests;
struct ida id_map;
struct mutex mutex;
};
static void fw_req_release(struct kref *kref)
{
struct fw_request *fw_req = container_of(kref, struct fw_request, kref);
dev_dbg(fw_req->fw_download->parent, "firmware %s released\n",
fw_req->name);
release_firmware(fw_req->fw);
/*
* The request timed out and the module may send a fetch-fw or
* release-fw request later. Lets block the id we allocated for this
* request, so that the AP doesn't refer to a later fw-request (with
* same firmware_id) for the old timedout fw-request.
*
* NOTE:
*
* This also means that after 255 timeouts we will fail to service new
* firmware downloads. But what else can we do in that case anyway? Lets
* just hope that it never happens.
*/
if (!fw_req->timedout)
ida_simple_remove(&fw_req->fw_download->id_map,
fw_req->firmware_id);
kfree(fw_req);
}
/*
* Incoming requests are serialized for a connection, and the only race possible
* is between the timeout handler freeing this and an incoming request.
*
* The operations on the fw-request list are protected by the mutex and
* get_fw_req() increments the reference count before returning a fw_req pointer
* to the users.
*
* free_firmware() also takes the mutex while removing an entry from the list,
* it guarantees that every user of fw_req has taken a kref-reference by now and
* we wouldn't have any new users.
*
* Once the last user drops the reference, the fw_req structure is freed.
*/
static void put_fw_req(struct fw_request *fw_req)
{
kref_put(&fw_req->kref, fw_req_release);
}
/* Caller must call put_fw_req() after using struct fw_request */
static struct fw_request *get_fw_req(struct fw_download *fw_download,
u8 firmware_id)
{
struct fw_request *fw_req;
mutex_lock(&fw_download->mutex);
list_for_each_entry(fw_req, &fw_download->fw_requests, node) {
if (fw_req->firmware_id == firmware_id) {
kref_get(&fw_req->kref);
goto unlock;
}
}
fw_req = NULL;
unlock:
mutex_unlock(&fw_download->mutex);
return fw_req;
}
static void free_firmware(struct fw_download *fw_download,
struct fw_request *fw_req)
{
/* Already disabled from timeout handlers */
if (fw_req->disabled)
return;
mutex_lock(&fw_download->mutex);
list_del(&fw_req->node);
mutex_unlock(&fw_download->mutex);
fw_req->disabled = true;
put_fw_req(fw_req);
}
static void fw_request_timedout(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct fw_request *fw_req = container_of(dwork, struct fw_request, dwork);
struct fw_download *fw_download = fw_req->fw_download;
dev_err(fw_download->parent,
"Timed out waiting for fetch / release firmware requests: %u\n",
fw_req->firmware_id);
fw_req->timedout = true;
free_firmware(fw_download, fw_req);
}
static int exceeds_release_timeout(struct fw_request *fw_req)
{
struct fw_download *fw_download = fw_req->fw_download;
if (time_before(jiffies, fw_req->release_timeout_j))
return 0;
dev_err(fw_download->parent,
"Firmware download didn't finish in time, abort: %d\n",
fw_req->firmware_id);
fw_req->timedout = true;
free_firmware(fw_download, fw_req);
return -ETIMEDOUT;
}
/* This returns path of the firmware blob on the disk */
static struct fw_request *find_firmware(struct fw_download *fw_download,
const char *tag)
{
struct gb_interface *intf = fw_download->connection->bundle->intf;
struct fw_request *fw_req;
int ret, req_count;
fw_req = kzalloc(sizeof(*fw_req), GFP_KERNEL);
if (!fw_req)
return ERR_PTR(-ENOMEM);
/* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
ret = ida_simple_get(&fw_download->id_map, 1, 256, GFP_KERNEL);
if (ret < 0) {
dev_err(fw_download->parent,
"failed to allocate firmware id (%d)\n", ret);
goto err_free_req;
}
fw_req->firmware_id = ret;
snprintf(fw_req->name, sizeof(fw_req->name),
FW_NAME_PREFIX "%08x_%08x_%08x_%08x_%s.tftf",
intf->ddbl1_manufacturer_id, intf->ddbl1_product_id,
intf->vendor_id, intf->product_id, tag);
dev_info(fw_download->parent, "Requested firmware package '%s'\n",
fw_req->name);
ret = request_firmware(&fw_req->fw, fw_req->name, fw_download->parent);
if (ret) {
dev_err(fw_download->parent,
"firmware request failed for %s (%d)\n", fw_req->name,
ret);
goto err_free_id;
}
fw_req->fw_download = fw_download;
kref_init(&fw_req->kref);
mutex_lock(&fw_download->mutex);
list_add(&fw_req->node, &fw_download->fw_requests);
mutex_unlock(&fw_download->mutex);
/* Timeout, in jiffies, within which firmware should get loaded */
req_count = DIV_ROUND_UP(fw_req->fw->size, MIN_FETCH_SIZE);
fw_req->release_timeout_j = jiffies + req_count * NEXT_REQ_TIMEOUT_J;
INIT_DELAYED_WORK(&fw_req->dwork, fw_request_timedout);
schedule_delayed_work(&fw_req->dwork, NEXT_REQ_TIMEOUT_J);
return fw_req;
err_free_id:
ida_simple_remove(&fw_download->id_map, fw_req->firmware_id);
err_free_req:
kfree(fw_req);
return ERR_PTR(ret);
}
static int fw_download_find_firmware(struct gb_operation *op)
{
struct gb_connection *connection = op->connection;
struct fw_download *fw_download = gb_connection_get_data(connection);
struct gb_fw_download_find_firmware_request *request;
struct gb_fw_download_find_firmware_response *response;
struct fw_request *fw_req;
const char *tag;
if (op->request->payload_size != sizeof(*request)) {
dev_err(fw_download->parent,
"illegal size of find firmware request (%zu != %zu)\n",
op->request->payload_size, sizeof(*request));
return -EINVAL;
}
request = op->request->payload;
tag = (const char *)request->firmware_tag;
/* firmware_tag must be null-terminated */
if (strnlen(tag, GB_FIRMWARE_TAG_MAX_SIZE) == GB_FIRMWARE_TAG_MAX_SIZE) {
dev_err(fw_download->parent,
"firmware-tag is not null-terminated\n");
return -EINVAL;
}
fw_req = find_firmware(fw_download, tag);
if (IS_ERR(fw_req))
return PTR_ERR(fw_req);
if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL)) {
dev_err(fw_download->parent, "error allocating response\n");
free_firmware(fw_download, fw_req);
return -ENOMEM;
}
response = op->response->payload;
response->firmware_id = fw_req->firmware_id;
response->size = cpu_to_le32(fw_req->fw->size);
dev_dbg(fw_download->parent,
"firmware size is %zu bytes\n", fw_req->fw->size);
return 0;
}
static int fw_download_fetch_firmware(struct gb_operation *op)
{
struct gb_connection *connection = op->connection;
struct fw_download *fw_download = gb_connection_get_data(connection);
struct gb_fw_download_fetch_firmware_request *request;
struct gb_fw_download_fetch_firmware_response *response;
struct fw_request *fw_req;
const struct firmware *fw;
unsigned int offset, size;
u8 firmware_id;
int ret = 0;
if (op->request->payload_size != sizeof(*request)) {
dev_err(fw_download->parent,
"Illegal size of fetch firmware request (%zu %zu)\n",
op->request->payload_size, sizeof(*request));
return -EINVAL;
}
request = op->request->payload;
offset = le32_to_cpu(request->offset);
size = le32_to_cpu(request->size);
firmware_id = request->firmware_id;
fw_req = get_fw_req(fw_download, firmware_id);
if (!fw_req) {
dev_err(fw_download->parent,
"firmware not available for id: %02u\n", firmware_id);
return -EINVAL;
}
/* Make sure work handler isn't running in parallel */
cancel_delayed_work_sync(&fw_req->dwork);
/* We timed-out before reaching here ? */
if (fw_req->disabled) {
ret = -ETIMEDOUT;
goto put_fw;
}
/*
* Firmware download must finish within a limited time interval. If it
* doesn't, then we might have a buggy Module on the other side. Abort
* download.
*/
ret = exceeds_release_timeout(fw_req);
if (ret)
goto put_fw;
fw = fw_req->fw;
if (offset >= fw->size || size > fw->size - offset) {
dev_err(fw_download->parent,
"bad fetch firmware request (offs = %u, size = %u)\n",
offset, size);
ret = -EINVAL;
goto put_fw;
}
if (!gb_operation_response_alloc(op, sizeof(*response) + size,
GFP_KERNEL)) {
dev_err(fw_download->parent,
"error allocating fetch firmware response\n");
ret = -ENOMEM;
goto put_fw;
}
response = op->response->payload;
memcpy(response->data, fw->data + offset, size);
dev_dbg(fw_download->parent,
"responding with firmware (offs = %u, size = %u)\n", offset,
size);
/* Refresh timeout */
schedule_delayed_work(&fw_req->dwork, NEXT_REQ_TIMEOUT_J);
put_fw:
put_fw_req(fw_req);
return ret;
}
static int fw_download_release_firmware(struct gb_operation *op)
{
struct gb_connection *connection = op->connection;
struct fw_download *fw_download = gb_connection_get_data(connection);
struct gb_fw_download_release_firmware_request *request;
struct fw_request *fw_req;
u8 firmware_id;
if (op->request->payload_size != sizeof(*request)) {
dev_err(fw_download->parent,
"Illegal size of release firmware request (%zu %zu)\n",
op->request->payload_size, sizeof(*request));
return -EINVAL;
}
request = op->request->payload;
firmware_id = request->firmware_id;
fw_req = get_fw_req(fw_download, firmware_id);
if (!fw_req) {
dev_err(fw_download->parent,
"firmware not available for id: %02u\n", firmware_id);
return -EINVAL;
}
cancel_delayed_work_sync(&fw_req->dwork);
free_firmware(fw_download, fw_req);
put_fw_req(fw_req);
dev_dbg(fw_download->parent, "release firmware\n");
return 0;
}
int gb_fw_download_request_handler(struct gb_operation *op)
{
u8 type = op->type;
switch (type) {
case GB_FW_DOWNLOAD_TYPE_FIND_FIRMWARE:
return fw_download_find_firmware(op);
case GB_FW_DOWNLOAD_TYPE_FETCH_FIRMWARE:
return fw_download_fetch_firmware(op);
case GB_FW_DOWNLOAD_TYPE_RELEASE_FIRMWARE:
return fw_download_release_firmware(op);
default:
dev_err(&op->connection->bundle->dev,
"unsupported request: %u\n", type);
return -EINVAL;
}
}
int gb_fw_download_connection_init(struct gb_connection *connection)
{
struct fw_download *fw_download;
int ret;
if (!connection)
return 0;
fw_download = kzalloc(sizeof(*fw_download), GFP_KERNEL);
if (!fw_download)
return -ENOMEM;
fw_download->parent = &connection->bundle->dev;
INIT_LIST_HEAD(&fw_download->fw_requests);
ida_init(&fw_download->id_map);
gb_connection_set_data(connection, fw_download);
fw_download->connection = connection;
mutex_init(&fw_download->mutex);
ret = gb_connection_enable(connection);
if (ret)
goto err_destroy_id_map;
return 0;
err_destroy_id_map:
ida_destroy(&fw_download->id_map);
kfree(fw_download);
return ret;
}
void gb_fw_download_connection_exit(struct gb_connection *connection)
{
struct fw_download *fw_download;
struct fw_request *fw_req, *tmp;
if (!connection)
return;
fw_download = gb_connection_get_data(connection);
gb_connection_disable(fw_download->connection);
/*
* Make sure we have a reference to the pending requests, before they
* are freed from the timeout handler.
*/
mutex_lock(&fw_download->mutex);
list_for_each_entry(fw_req, &fw_download->fw_requests, node)
kref_get(&fw_req->kref);
mutex_unlock(&fw_download->mutex);
/* Release pending firmware packages */
list_for_each_entry_safe(fw_req, tmp, &fw_download->fw_requests, node) {
cancel_delayed_work_sync(&fw_req->dwork);
free_firmware(fw_download, fw_req);
put_fw_req(fw_req);
}
ida_destroy(&fw_download->id_map);
kfree(fw_download);
}

View File

@ -0,0 +1,721 @@
/*
* Greybus Firmware Management Protocol Driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/cdev.h>
#include <linux/completion.h>
#include <linux/firmware.h>
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/ioctl.h>
#include <linux/uaccess.h>
#include "firmware.h"
#include "greybus_firmware.h"
#include "greybus.h"
#define FW_MGMT_TIMEOUT_MS 1000
struct fw_mgmt {
struct device *parent;
struct gb_connection *connection;
struct kref kref;
struct list_head node;
/* Common id-map for interface and backend firmware requests */
struct ida id_map;
struct mutex mutex;
struct completion completion;
struct cdev cdev;
struct device *class_device;
dev_t dev_num;
unsigned int timeout_jiffies;
bool disabled; /* connection getting disabled */
/* Interface Firmware specific fields */
bool mode_switch_started;
bool intf_fw_loaded;
u8 intf_fw_request_id;
u8 intf_fw_status;
u16 intf_fw_major;
u16 intf_fw_minor;
/* Backend Firmware specific fields */
u8 backend_fw_request_id;
u8 backend_fw_status;
};
/*
* Number of minor devices this driver supports.
* There will be exactly one required per Interface.
*/
#define NUM_MINORS U8_MAX
static struct class *fw_mgmt_class;
static dev_t fw_mgmt_dev_num;
static DEFINE_IDA(fw_mgmt_minors_map);
static LIST_HEAD(fw_mgmt_list);
static DEFINE_MUTEX(list_mutex);
static void fw_mgmt_kref_release(struct kref *kref)
{
struct fw_mgmt *fw_mgmt = container_of(kref, struct fw_mgmt, kref);
ida_destroy(&fw_mgmt->id_map);
kfree(fw_mgmt);
}
/*
* All users of fw_mgmt take a reference (from within list_mutex lock), before
* they get a pointer to play with. And the structure will be freed only after
* the last user has put the reference to it.
*/
static void put_fw_mgmt(struct fw_mgmt *fw_mgmt)
{
kref_put(&fw_mgmt->kref, fw_mgmt_kref_release);
}
/* Caller must call put_fw_mgmt() after using struct fw_mgmt */
static struct fw_mgmt *get_fw_mgmt(struct cdev *cdev)
{
struct fw_mgmt *fw_mgmt;
mutex_lock(&list_mutex);
list_for_each_entry(fw_mgmt, &fw_mgmt_list, node) {
if (&fw_mgmt->cdev == cdev) {
kref_get(&fw_mgmt->kref);
goto unlock;
}
}
fw_mgmt = NULL;
unlock:
mutex_unlock(&list_mutex);
return fw_mgmt;
}
static int fw_mgmt_interface_fw_version_operation(struct fw_mgmt *fw_mgmt,
struct fw_mgmt_ioc_get_intf_version *fw_info)
{
struct gb_connection *connection = fw_mgmt->connection;
struct gb_fw_mgmt_interface_fw_version_response response;
int ret;
ret = gb_operation_sync(connection,
GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION, NULL, 0,
&response, sizeof(response));
if (ret) {
dev_err(fw_mgmt->parent,
"failed to get interface firmware version (%d)\n", ret);
return ret;
}
fw_info->major = le16_to_cpu(response.major);
fw_info->minor = le16_to_cpu(response.minor);
strncpy(fw_info->firmware_tag, response.firmware_tag,
GB_FIRMWARE_TAG_MAX_SIZE);
/*
* The firmware-tag should be NULL terminated, otherwise throw error but
* don't fail.
*/
if (fw_info->firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
dev_err(fw_mgmt->parent,
"fw-version: firmware-tag is not NULL terminated\n");
fw_info->firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] = '\0';
}
return 0;
}
static int fw_mgmt_load_and_validate_operation(struct fw_mgmt *fw_mgmt,
u8 load_method, const char *tag)
{
struct gb_fw_mgmt_load_and_validate_fw_request request;
int ret;
if (load_method != GB_FW_LOAD_METHOD_UNIPRO &&
load_method != GB_FW_LOAD_METHOD_INTERNAL) {
dev_err(fw_mgmt->parent,
"invalid load-method (%d)\n", load_method);
return -EINVAL;
}
request.load_method = load_method;
strncpy(request.firmware_tag, tag, GB_FIRMWARE_TAG_MAX_SIZE);
/*
* The firmware-tag should be NULL terminated, otherwise throw error and
* fail.
*/
if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
dev_err(fw_mgmt->parent, "load-and-validate: firmware-tag is not NULL terminated\n");
return -EINVAL;
}
/* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
ret = ida_simple_get(&fw_mgmt->id_map, 1, 256, GFP_KERNEL);
if (ret < 0) {
dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n",
ret);
return ret;
}
fw_mgmt->intf_fw_request_id = ret;
fw_mgmt->intf_fw_loaded = false;
request.request_id = ret;
ret = gb_operation_sync(fw_mgmt->connection,
GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW, &request,
sizeof(request), NULL, 0);
if (ret) {
ida_simple_remove(&fw_mgmt->id_map,
fw_mgmt->intf_fw_request_id);
fw_mgmt->intf_fw_request_id = 0;
dev_err(fw_mgmt->parent,
"load and validate firmware request failed (%d)\n",
ret);
return ret;
}
return 0;
}
static int fw_mgmt_interface_fw_loaded_operation(struct gb_operation *op)
{
struct gb_connection *connection = op->connection;
struct fw_mgmt *fw_mgmt = gb_connection_get_data(connection);
struct gb_fw_mgmt_loaded_fw_request *request;
/* No pending load and validate request ? */
if (!fw_mgmt->intf_fw_request_id) {
dev_err(fw_mgmt->parent,
"unexpected firmware loaded request received\n");
return -ENODEV;
}
if (op->request->payload_size != sizeof(*request)) {
dev_err(fw_mgmt->parent, "illegal size of firmware loaded request (%zu != %zu)\n",
op->request->payload_size, sizeof(*request));
return -EINVAL;
}
request = op->request->payload;
/* Invalid request-id ? */
if (request->request_id != fw_mgmt->intf_fw_request_id) {
dev_err(fw_mgmt->parent, "invalid request id for firmware loaded request (%02u != %02u)\n",
fw_mgmt->intf_fw_request_id, request->request_id);
return -ENODEV;
}
ida_simple_remove(&fw_mgmt->id_map, fw_mgmt->intf_fw_request_id);
fw_mgmt->intf_fw_request_id = 0;
fw_mgmt->intf_fw_status = request->status;
fw_mgmt->intf_fw_major = le16_to_cpu(request->major);
fw_mgmt->intf_fw_minor = le16_to_cpu(request->minor);
if (fw_mgmt->intf_fw_status == GB_FW_LOAD_STATUS_FAILED)
dev_err(fw_mgmt->parent,
"failed to load interface firmware, status:%02x\n",
fw_mgmt->intf_fw_status);
else if (fw_mgmt->intf_fw_status == GB_FW_LOAD_STATUS_VALIDATION_FAILED)
dev_err(fw_mgmt->parent,
"failed to validate interface firmware, status:%02x\n",
fw_mgmt->intf_fw_status);
else
fw_mgmt->intf_fw_loaded = true;
complete(&fw_mgmt->completion);
return 0;
}
static int fw_mgmt_backend_fw_version_operation(struct fw_mgmt *fw_mgmt,
struct fw_mgmt_ioc_get_backend_version *fw_info)
{
struct gb_connection *connection = fw_mgmt->connection;
struct gb_fw_mgmt_backend_fw_version_request request;
struct gb_fw_mgmt_backend_fw_version_response response;
int ret;
strncpy(request.firmware_tag, fw_info->firmware_tag,
GB_FIRMWARE_TAG_MAX_SIZE);
/*
* The firmware-tag should be NULL terminated, otherwise throw error and
* fail.
*/
if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
dev_err(fw_mgmt->parent, "backend-version: firmware-tag is not NULL terminated\n");
return -EINVAL;
}
ret = gb_operation_sync(connection,
GB_FW_MGMT_TYPE_BACKEND_FW_VERSION, &request,
sizeof(request), &response, sizeof(response));
if (ret) {
dev_err(fw_mgmt->parent, "failed to get version of %s backend firmware (%d)\n",
fw_info->firmware_tag, ret);
return ret;
}
fw_info->status = response.status;
/* Reset version as that should be non-zero only for success case */
fw_info->major = 0;
fw_info->minor = 0;
switch (fw_info->status) {
case GB_FW_BACKEND_VERSION_STATUS_SUCCESS:
fw_info->major = le16_to_cpu(response.major);
fw_info->minor = le16_to_cpu(response.minor);
break;
case GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE:
case GB_FW_BACKEND_VERSION_STATUS_RETRY:
break;
case GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED:
dev_err(fw_mgmt->parent,
"Firmware with tag %s is not supported by Interface\n",
fw_info->firmware_tag);
break;
default:
dev_err(fw_mgmt->parent, "Invalid status received: %u\n",
fw_info->status);
}
return 0;
}
static int fw_mgmt_backend_fw_update_operation(struct fw_mgmt *fw_mgmt,
char *tag)
{
struct gb_fw_mgmt_backend_fw_update_request request;
int ret;
strncpy(request.firmware_tag, tag, GB_FIRMWARE_TAG_MAX_SIZE);
/*
* The firmware-tag should be NULL terminated, otherwise throw error and
* fail.
*/
if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
dev_err(fw_mgmt->parent, "backend-update: firmware-tag is not NULL terminated\n");
return -EINVAL;
}
/* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
ret = ida_simple_get(&fw_mgmt->id_map, 1, 256, GFP_KERNEL);
if (ret < 0) {
dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n",
ret);
return ret;
}
fw_mgmt->backend_fw_request_id = ret;
request.request_id = ret;
ret = gb_operation_sync(fw_mgmt->connection,
GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE, &request,
sizeof(request), NULL, 0);
if (ret) {
ida_simple_remove(&fw_mgmt->id_map,
fw_mgmt->backend_fw_request_id);
fw_mgmt->backend_fw_request_id = 0;
dev_err(fw_mgmt->parent,
"backend %s firmware update request failed (%d)\n", tag,
ret);
return ret;
}
return 0;
}
static int fw_mgmt_backend_fw_updated_operation(struct gb_operation *op)
{
struct gb_connection *connection = op->connection;
struct fw_mgmt *fw_mgmt = gb_connection_get_data(connection);
struct gb_fw_mgmt_backend_fw_updated_request *request;
/* No pending load and validate request ? */
if (!fw_mgmt->backend_fw_request_id) {
dev_err(fw_mgmt->parent, "unexpected backend firmware updated request received\n");
return -ENODEV;
}
if (op->request->payload_size != sizeof(*request)) {
dev_err(fw_mgmt->parent, "illegal size of backend firmware updated request (%zu != %zu)\n",
op->request->payload_size, sizeof(*request));
return -EINVAL;
}
request = op->request->payload;
/* Invalid request-id ? */
if (request->request_id != fw_mgmt->backend_fw_request_id) {
dev_err(fw_mgmt->parent, "invalid request id for backend firmware updated request (%02u != %02u)\n",
fw_mgmt->backend_fw_request_id, request->request_id);
return -ENODEV;
}
ida_simple_remove(&fw_mgmt->id_map, fw_mgmt->backend_fw_request_id);
fw_mgmt->backend_fw_request_id = 0;
fw_mgmt->backend_fw_status = request->status;
if ((fw_mgmt->backend_fw_status != GB_FW_BACKEND_FW_STATUS_SUCCESS) &&
(fw_mgmt->backend_fw_status != GB_FW_BACKEND_FW_STATUS_RETRY))
dev_err(fw_mgmt->parent,
"failed to load backend firmware: %02x\n",
fw_mgmt->backend_fw_status);
complete(&fw_mgmt->completion);
return 0;
}
/* Char device fops */
static int fw_mgmt_open(struct inode *inode, struct file *file)
{
struct fw_mgmt *fw_mgmt = get_fw_mgmt(inode->i_cdev);
/* fw_mgmt structure can't get freed until file descriptor is closed */
if (fw_mgmt) {
file->private_data = fw_mgmt;
return 0;
}
return -ENODEV;
}
static int fw_mgmt_release(struct inode *inode, struct file *file)
{
struct fw_mgmt *fw_mgmt = file->private_data;
put_fw_mgmt(fw_mgmt);
return 0;
}
static int fw_mgmt_ioctl(struct fw_mgmt *fw_mgmt, unsigned int cmd,
void __user *buf)
{
struct fw_mgmt_ioc_get_intf_version intf_fw_info;
struct fw_mgmt_ioc_get_backend_version backend_fw_info;
struct fw_mgmt_ioc_intf_load_and_validate intf_load;
struct fw_mgmt_ioc_backend_fw_update backend_update;
unsigned int timeout;
int ret;
/* Reject any operations after mode-switch has started */
if (fw_mgmt->mode_switch_started)
return -EBUSY;
switch (cmd) {
case FW_MGMT_IOC_GET_INTF_FW:
ret = fw_mgmt_interface_fw_version_operation(fw_mgmt,
&intf_fw_info);
if (ret)
return ret;
if (copy_to_user(buf, &intf_fw_info, sizeof(intf_fw_info)))
return -EFAULT;
return 0;
case FW_MGMT_IOC_GET_BACKEND_FW:
if (copy_from_user(&backend_fw_info, buf,
sizeof(backend_fw_info)))
return -EFAULT;
ret = fw_mgmt_backend_fw_version_operation(fw_mgmt,
&backend_fw_info);
if (ret)
return ret;
if (copy_to_user(buf, &backend_fw_info,
sizeof(backend_fw_info)))
return -EFAULT;
return 0;
case FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE:
if (copy_from_user(&intf_load, buf, sizeof(intf_load)))
return -EFAULT;
ret = fw_mgmt_load_and_validate_operation(fw_mgmt,
intf_load.load_method, intf_load.firmware_tag);
if (ret)
return ret;
if (!wait_for_completion_timeout(&fw_mgmt->completion,
fw_mgmt->timeout_jiffies)) {
dev_err(fw_mgmt->parent, "timed out waiting for firmware load and validation to finish\n");
return -ETIMEDOUT;
}
intf_load.status = fw_mgmt->intf_fw_status;
intf_load.major = fw_mgmt->intf_fw_major;
intf_load.minor = fw_mgmt->intf_fw_minor;
if (copy_to_user(buf, &intf_load, sizeof(intf_load)))
return -EFAULT;
return 0;
case FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE:
if (copy_from_user(&backend_update, buf,
sizeof(backend_update)))
return -EFAULT;
ret = fw_mgmt_backend_fw_update_operation(fw_mgmt,
backend_update.firmware_tag);
if (ret)
return ret;
if (!wait_for_completion_timeout(&fw_mgmt->completion,
fw_mgmt->timeout_jiffies)) {
dev_err(fw_mgmt->parent, "timed out waiting for backend firmware update to finish\n");
return -ETIMEDOUT;
}
backend_update.status = fw_mgmt->backend_fw_status;
if (copy_to_user(buf, &backend_update, sizeof(backend_update)))
return -EFAULT;
return 0;
case FW_MGMT_IOC_SET_TIMEOUT_MS:
if (get_user(timeout, (unsigned int __user *)buf))
return -EFAULT;
if (!timeout) {
dev_err(fw_mgmt->parent, "timeout can't be zero\n");
return -EINVAL;
}
fw_mgmt->timeout_jiffies = msecs_to_jiffies(timeout);
return 0;
case FW_MGMT_IOC_MODE_SWITCH:
if (!fw_mgmt->intf_fw_loaded) {
dev_err(fw_mgmt->parent,
"Firmware not loaded for mode-switch\n");
return -EPERM;
}
/*
* Disallow new ioctls as the fw-core bundle driver is going to
* get disconnected soon and the character device will get
* removed.
*/
fw_mgmt->mode_switch_started = true;
ret = gb_interface_request_mode_switch(fw_mgmt->connection->intf);
if (ret) {
dev_err(fw_mgmt->parent, "Mode-switch failed: %d\n",
ret);
fw_mgmt->mode_switch_started = false;
return ret;
}
return 0;
default:
return -ENOTTY;
}
}
static long fw_mgmt_ioctl_unlocked(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct fw_mgmt *fw_mgmt = file->private_data;
struct gb_bundle *bundle = fw_mgmt->connection->bundle;
int ret = -ENODEV;
/*
* Serialize ioctls.
*
* We don't want the user to do few operations in parallel. For example,
* updating Interface firmware in parallel for the same Interface. There
* is no need to do things in parallel for speed and we can avoid having
* complicated code for now.
*
* This is also used to protect ->disabled, which is used to check if
* the connection is getting disconnected, so that we don't start any
* new operations.
*/
mutex_lock(&fw_mgmt->mutex);
if (!fw_mgmt->disabled) {
ret = gb_pm_runtime_get_sync(bundle);
if (!ret) {
ret = fw_mgmt_ioctl(fw_mgmt, cmd, (void __user *)arg);
gb_pm_runtime_put_autosuspend(bundle);
}
}
mutex_unlock(&fw_mgmt->mutex);
return ret;
}
static const struct file_operations fw_mgmt_fops = {
.owner = THIS_MODULE,
.open = fw_mgmt_open,
.release = fw_mgmt_release,
.unlocked_ioctl = fw_mgmt_ioctl_unlocked,
};
int gb_fw_mgmt_request_handler(struct gb_operation *op)
{
u8 type = op->type;
switch (type) {
case GB_FW_MGMT_TYPE_LOADED_FW:
return fw_mgmt_interface_fw_loaded_operation(op);
case GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED:
return fw_mgmt_backend_fw_updated_operation(op);
default:
dev_err(&op->connection->bundle->dev,
"unsupported request: %u\n", type);
return -EINVAL;
}
}
int gb_fw_mgmt_connection_init(struct gb_connection *connection)
{
struct fw_mgmt *fw_mgmt;
int ret, minor;
if (!connection)
return 0;
fw_mgmt = kzalloc(sizeof(*fw_mgmt), GFP_KERNEL);
if (!fw_mgmt)
return -ENOMEM;
fw_mgmt->parent = &connection->bundle->dev;
fw_mgmt->timeout_jiffies = msecs_to_jiffies(FW_MGMT_TIMEOUT_MS);
fw_mgmt->connection = connection;
gb_connection_set_data(connection, fw_mgmt);
init_completion(&fw_mgmt->completion);
ida_init(&fw_mgmt->id_map);
mutex_init(&fw_mgmt->mutex);
kref_init(&fw_mgmt->kref);
mutex_lock(&list_mutex);
list_add(&fw_mgmt->node, &fw_mgmt_list);
mutex_unlock(&list_mutex);
ret = gb_connection_enable(connection);
if (ret)
goto err_list_del;
minor = ida_simple_get(&fw_mgmt_minors_map, 0, NUM_MINORS, GFP_KERNEL);
if (minor < 0) {
ret = minor;
goto err_connection_disable;
}
/* Add a char device to allow userspace to interact with fw-mgmt */
fw_mgmt->dev_num = MKDEV(MAJOR(fw_mgmt_dev_num), minor);
cdev_init(&fw_mgmt->cdev, &fw_mgmt_fops);
ret = cdev_add(&fw_mgmt->cdev, fw_mgmt->dev_num, 1);
if (ret)
goto err_remove_ida;
/* Add a soft link to the previously added char-dev within the bundle */
fw_mgmt->class_device = device_create(fw_mgmt_class, fw_mgmt->parent,
fw_mgmt->dev_num, NULL,
"gb-fw-mgmt-%d", minor);
if (IS_ERR(fw_mgmt->class_device)) {
ret = PTR_ERR(fw_mgmt->class_device);
goto err_del_cdev;
}
return 0;
err_del_cdev:
cdev_del(&fw_mgmt->cdev);
err_remove_ida:
ida_simple_remove(&fw_mgmt_minors_map, minor);
err_connection_disable:
gb_connection_disable(connection);
err_list_del:
mutex_lock(&list_mutex);
list_del(&fw_mgmt->node);
mutex_unlock(&list_mutex);
put_fw_mgmt(fw_mgmt);
return ret;
}
void gb_fw_mgmt_connection_exit(struct gb_connection *connection)
{
struct fw_mgmt *fw_mgmt;
if (!connection)
return;
fw_mgmt = gb_connection_get_data(connection);
device_destroy(fw_mgmt_class, fw_mgmt->dev_num);
cdev_del(&fw_mgmt->cdev);
ida_simple_remove(&fw_mgmt_minors_map, MINOR(fw_mgmt->dev_num));
/*
* Disallow any new ioctl operations on the char device and wait for
* existing ones to finish.
*/
mutex_lock(&fw_mgmt->mutex);
fw_mgmt->disabled = true;
mutex_unlock(&fw_mgmt->mutex);
/* All pending greybus operations should have finished by now */
gb_connection_disable(fw_mgmt->connection);
/* Disallow new users to get access to the fw_mgmt structure */
mutex_lock(&list_mutex);
list_del(&fw_mgmt->node);
mutex_unlock(&list_mutex);
/*
* All current users of fw_mgmt would have taken a reference to it by
* now, we can drop our reference and wait the last user will get
* fw_mgmt freed.
*/
put_fw_mgmt(fw_mgmt);
}
int fw_mgmt_init(void)
{
int ret;
fw_mgmt_class = class_create(THIS_MODULE, "gb_fw_mgmt");
if (IS_ERR(fw_mgmt_class))
return PTR_ERR(fw_mgmt_class);
ret = alloc_chrdev_region(&fw_mgmt_dev_num, 0, NUM_MINORS,
"gb_fw_mgmt");
if (ret)
goto err_remove_class;
return 0;
err_remove_class:
class_destroy(fw_mgmt_class);
return ret;
}
void fw_mgmt_exit(void)
{
unregister_chrdev_region(fw_mgmt_dev_num, NUM_MINORS);
class_destroy(fw_mgmt_class);
ida_destroy(&fw_mgmt_minors_map);
}

View File

@ -0,0 +1,127 @@
/*
* Greybus Camera protocol driver.
*
* Copyright 2015 Google Inc.
*
* Released under the GPLv2 only.
*/
#ifndef __GB_CAMERA_H
#define __GB_CAMERA_H
#include <linux/v4l2-mediabus.h>
/* Input flags need to be set from the caller */
#define GB_CAMERA_IN_FLAG_TEST (1 << 0)
/* Output flags returned */
#define GB_CAMERA_OUT_FLAG_ADJUSTED (1 << 0)
/**
* struct gb_camera_stream - Represents greybus camera stream.
* @width: Stream width in pixels.
* @height: Stream height in pixels.
* @pixel_code: Media bus pixel code.
* @vc: MIPI CSI virtual channel.
* @dt: MIPI CSI data types. Most formats use a single data type, in which case
* the second element will be ignored.
* @max_size: Maximum size of a frame in bytes. The camera module guarantees
* that all data between the Frame Start and Frame End packet for
* the associated virtual channel and data type(s) will not exceed
* this size.
*/
struct gb_camera_stream {
unsigned int width;
unsigned int height;
enum v4l2_mbus_pixelcode pixel_code;
unsigned int vc;
unsigned int dt[2];
unsigned int max_size;
};
/**
* struct gb_camera_csi_params - CSI configuration parameters
* @num_lanes: number of CSI data lanes
* @clk_freq: CSI clock frequency in Hz
*/
struct gb_camera_csi_params {
unsigned int num_lanes;
unsigned int clk_freq;
};
/**
* struct gb_camera_ops - Greybus camera operations, used by the Greybus camera
* driver to expose operations to the host camera driver.
* @capabilities: Retrieve camera capabilities and store them in the buffer
* 'buf' capabilities. The buffer maximum size is specified by
* the caller in the 'size' parameter, and the effective
* capabilities size is returned from the function. If the buffer
* size is too small to hold the capabilities an error is
* returned and the buffer is left untouched.
*
* @configure_streams: Negotiate configuration and prepare the module for video
* capture. The caller specifies the number of streams it
* requests in the 'nstreams' argument and the associated
* streams configurations in the 'streams' argument. The
* GB_CAMERA_IN_FLAG_TEST 'flag' can be set to test a
* configuration without applying it, otherwise the
* configuration is applied by the module. The module can
* decide to modify the requested configuration, including
* using a different number of streams. In that case the
* modified configuration won't be applied, the
* GB_CAMERA_OUT_FLAG_ADJUSTED 'flag' will be set upon
* return, and the modified configuration and number of
* streams stored in 'streams' and 'array'. The module
* returns its CSI-2 bus parameters in the 'csi_params'
* structure in all cases.
*
* @capture: Submit a capture request. The supplied 'request_id' must be unique
* and higher than the IDs of all the previously submitted requests.
* The 'streams' argument specifies which streams are affected by the
* request in the form of a bitmask, with bits corresponding to the
* configured streams indexes. If the request contains settings, the
* 'settings' argument points to the settings buffer and its size is
* specified by the 'settings_size' argument. Otherwise the 'settings'
* argument should be set to NULL and 'settings_size' to 0.
*
* @flush: Flush the capture requests queue. Return the ID of the last request
* that will processed by the device before it stops transmitting video
* frames. All queued capture requests with IDs higher than the returned
* ID will be dropped without being processed.
*/
struct gb_camera_ops {
ssize_t (*capabilities)(void *priv, char *buf, size_t len);
int (*configure_streams)(void *priv, unsigned int *nstreams,
unsigned int *flags, struct gb_camera_stream *streams,
struct gb_camera_csi_params *csi_params);
int (*capture)(void *priv, u32 request_id,
unsigned int streams, unsigned int num_frames,
size_t settings_size, const void *settings);
int (*flush)(void *priv, u32 *request_id);
};
/**
* struct gb_camera_module - Represents greybus camera module.
* @priv: Module private data, passed to all camera operations.
* @ops: Greybus camera operation callbacks.
* @interface_id: Interface id of the module.
* @refcount: Reference counting object.
* @release: Module release function.
* @list: List entry in the camera modules list.
*/
struct gb_camera_module {
void *priv;
const struct gb_camera_ops *ops;
unsigned int interface_id;
struct kref refcount;
void (*release)(struct kref *kref);
struct list_head list; /* Global list */
};
#define gb_camera_call(f, op, args...) \
(!(f) ? -ENODEV : (((f)->ops->op) ? \
(f)->ops->op((f)->priv, ##args) : -ENOIOCTLCMD))
int gb_camera_register(struct gb_camera_module *module);
int gb_camera_unregister(struct gb_camera_module *module);
#endif /* __GB_CAMERA_H */

View File

@ -0,0 +1,360 @@
/*
* Greybus Bridged-Phy Bus driver
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
#include "greybus.h"
#include "gbphy.h"
#define GB_GBPHY_AUTOSUSPEND_MS 3000
struct gbphy_host {
struct gb_bundle *bundle;
struct list_head devices;
};
static DEFINE_IDA(gbphy_id);
static ssize_t protocol_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
return sprintf(buf, "0x%02x\n", gbphy_dev->cport_desc->protocol_id);
}
static DEVICE_ATTR_RO(protocol_id);
static struct attribute *gbphy_dev_attrs[] = {
&dev_attr_protocol_id.attr,
NULL,
};
ATTRIBUTE_GROUPS(gbphy_dev);
static void gbphy_dev_release(struct device *dev)
{
struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
ida_simple_remove(&gbphy_id, gbphy_dev->id);
kfree(gbphy_dev);
}
#ifdef CONFIG_PM_RUNTIME
static int gb_gbphy_idle(struct device *dev)
{
pm_runtime_mark_last_busy(dev);
pm_request_autosuspend(dev);
return 0;
}
#endif
static const struct dev_pm_ops gb_gbphy_pm_ops = {
SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend,
pm_generic_runtime_resume,
gb_gbphy_idle)
};
static struct device_type greybus_gbphy_dev_type = {
.name = "gbphy_device",
.release = gbphy_dev_release,
.pm = &gb_gbphy_pm_ops,
};
static int gbphy_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
struct greybus_descriptor_cport *cport_desc = gbphy_dev->cport_desc;
struct gb_bundle *bundle = gbphy_dev->bundle;
struct gb_interface *intf = bundle->intf;
struct gb_module *module = intf->module;
struct gb_host_device *hd = intf->hd;
if (add_uevent_var(env, "BUS=%u", hd->bus_id))
return -ENOMEM;
if (add_uevent_var(env, "MODULE=%u", module->module_id))
return -ENOMEM;
if (add_uevent_var(env, "INTERFACE=%u", intf->interface_id))
return -ENOMEM;
if (add_uevent_var(env, "GREYBUS_ID=%08x/%08x",
intf->vendor_id, intf->product_id))
return -ENOMEM;
if (add_uevent_var(env, "BUNDLE=%u", gbphy_dev->bundle->id))
return -ENOMEM;
if (add_uevent_var(env, "BUNDLE_CLASS=%02x", bundle->class))
return -ENOMEM;
if (add_uevent_var(env, "GBPHY=%u", gbphy_dev->id))
return -ENOMEM;
if (add_uevent_var(env, "PROTOCOL_ID=%02x", cport_desc->protocol_id))
return -ENOMEM;
return 0;
}
static const struct gbphy_device_id *
gbphy_dev_match_id(struct gbphy_device *gbphy_dev, struct gbphy_driver *gbphy_drv)
{
const struct gbphy_device_id *id = gbphy_drv->id_table;
if (!id)
return NULL;
for (; id->protocol_id; id++)
if (id->protocol_id == gbphy_dev->cport_desc->protocol_id)
return id;
return NULL;
}
static int gbphy_dev_match(struct device *dev, struct device_driver *drv)
{
struct gbphy_driver *gbphy_drv = to_gbphy_driver(drv);
struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
const struct gbphy_device_id *id;
id = gbphy_dev_match_id(gbphy_dev, gbphy_drv);
if (id)
return 1;
return 0;
}
static int gbphy_dev_probe(struct device *dev)
{
struct gbphy_driver *gbphy_drv = to_gbphy_driver(dev->driver);
struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
const struct gbphy_device_id *id;
int ret;
id = gbphy_dev_match_id(gbphy_dev, gbphy_drv);
if (!id)
return -ENODEV;
/* for old kernels we need get_sync to resume parent devices */
ret = gb_pm_runtime_get_sync(gbphy_dev->bundle);
if (ret < 0)
return ret;
pm_runtime_set_autosuspend_delay(dev, GB_GBPHY_AUTOSUSPEND_MS);
pm_runtime_use_autosuspend(dev);
pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
/*
* Drivers should call put on the gbphy dev before returning
* from probe if they support runtime pm.
*/
ret = gbphy_drv->probe(gbphy_dev, id);
if (ret) {
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
pm_runtime_put_noidle(dev);
pm_runtime_dont_use_autosuspend(dev);
}
gb_pm_runtime_put_autosuspend(gbphy_dev->bundle);
return ret;
}
static int gbphy_dev_remove(struct device *dev)
{
struct gbphy_driver *gbphy_drv = to_gbphy_driver(dev->driver);
struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
gbphy_drv->remove(gbphy_dev);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
pm_runtime_put_noidle(dev);
pm_runtime_dont_use_autosuspend(dev);
return 0;
}
static struct bus_type gbphy_bus_type = {
.name = "gbphy",
.match = gbphy_dev_match,
.probe = gbphy_dev_probe,
.remove = gbphy_dev_remove,
.uevent = gbphy_dev_uevent,
};
int gb_gbphy_register_driver(struct gbphy_driver *driver,
struct module *owner, const char *mod_name)
{
int retval;
if (greybus_disabled())
return -ENODEV;
driver->driver.bus = &gbphy_bus_type;
driver->driver.name = driver->name;
driver->driver.owner = owner;
driver->driver.mod_name = mod_name;
retval = driver_register(&driver->driver);
if (retval)
return retval;
pr_info("registered new driver %s\n", driver->name);
return 0;
}
EXPORT_SYMBOL_GPL(gb_gbphy_register_driver);
void gb_gbphy_deregister_driver(struct gbphy_driver *driver)
{
driver_unregister(&driver->driver);
}
EXPORT_SYMBOL_GPL(gb_gbphy_deregister_driver);
static struct gbphy_device *gb_gbphy_create_dev(struct gb_bundle *bundle,
struct greybus_descriptor_cport *cport_desc)
{
struct gbphy_device *gbphy_dev;
int retval;
int id;
id = ida_simple_get(&gbphy_id, 1, 0, GFP_KERNEL);
if (id < 0)
return ERR_PTR(id);
gbphy_dev = kzalloc(sizeof(*gbphy_dev), GFP_KERNEL);
if (!gbphy_dev) {
ida_simple_remove(&gbphy_id, id);
return ERR_PTR(-ENOMEM);
}
gbphy_dev->id = id;
gbphy_dev->bundle = bundle;
gbphy_dev->cport_desc = cport_desc;
gbphy_dev->dev.parent = &bundle->dev;
gbphy_dev->dev.bus = &gbphy_bus_type;
gbphy_dev->dev.type = &greybus_gbphy_dev_type;
gbphy_dev->dev.groups = gbphy_dev_groups;
gbphy_dev->dev.dma_mask = bundle->dev.dma_mask;
dev_set_name(&gbphy_dev->dev, "gbphy%d", id);
retval = device_register(&gbphy_dev->dev);
if (retval) {
put_device(&gbphy_dev->dev);
return ERR_PTR(retval);
}
return gbphy_dev;
}
static void gb_gbphy_disconnect(struct gb_bundle *bundle)
{
struct gbphy_host *gbphy_host = greybus_get_drvdata(bundle);
struct gbphy_device *gbphy_dev, *temp;
int ret;
ret = gb_pm_runtime_get_sync(bundle);
if (ret < 0)
gb_pm_runtime_get_noresume(bundle);
list_for_each_entry_safe(gbphy_dev, temp, &gbphy_host->devices, list) {
list_del(&gbphy_dev->list);
device_unregister(&gbphy_dev->dev);
}
kfree(gbphy_host);
}
static int gb_gbphy_probe(struct gb_bundle *bundle,
const struct greybus_bundle_id *id)
{
struct gbphy_host *gbphy_host;
struct gbphy_device *gbphy_dev;
int i;
if (bundle->num_cports == 0)
return -ENODEV;
gbphy_host = kzalloc(sizeof(*gbphy_host), GFP_KERNEL);
if (!gbphy_host)
return -ENOMEM;
gbphy_host->bundle = bundle;
INIT_LIST_HEAD(&gbphy_host->devices);
greybus_set_drvdata(bundle, gbphy_host);
/*
* Create a bunch of children devices, one per cport, and bind the
* bridged phy drivers to them.
*/
for (i = 0; i < bundle->num_cports; ++i) {
gbphy_dev = gb_gbphy_create_dev(bundle, &bundle->cport_desc[i]);
if (IS_ERR(gbphy_dev)) {
gb_gbphy_disconnect(bundle);
return PTR_ERR(gbphy_dev);
}
list_add(&gbphy_dev->list, &gbphy_host->devices);
}
gb_pm_runtime_put_autosuspend(bundle);
return 0;
}
static const struct greybus_bundle_id gb_gbphy_id_table[] = {
{ GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_BRIDGED_PHY) },
{ },
};
MODULE_DEVICE_TABLE(greybus, gb_gbphy_id_table);
static struct greybus_driver gb_gbphy_driver = {
.name = "gbphy",
.probe = gb_gbphy_probe,
.disconnect = gb_gbphy_disconnect,
.id_table = gb_gbphy_id_table,
};
static int __init gbphy_init(void)
{
int retval;
retval = bus_register(&gbphy_bus_type);
if (retval) {
pr_err("gbphy bus register failed (%d)\n", retval);
return retval;
}
retval = greybus_register(&gb_gbphy_driver);
if (retval) {
pr_err("error registering greybus driver\n");
goto error_gbphy;
}
return 0;
error_gbphy:
bus_unregister(&gbphy_bus_type);
ida_destroy(&gbphy_id);
return retval;
}
module_init(gbphy_init);
static void __exit gbphy_exit(void)
{
greybus_deregister(&gb_gbphy_driver);
bus_unregister(&gbphy_bus_type);
ida_destroy(&gbphy_id);
}
module_exit(gbphy_exit);
MODULE_LICENSE("GPL v2");

Some files were not shown because too many files have changed in this diff Show More