I've set up two STM32 Boards, one as SPI-master, the other one as slave.
I write directly to registers without any framework.
Master to slave communication is working perfectly. But the slave sends garbage sometimes.
I first tried interrupts, but the slave would always send garbage and often receive garbage.
Now I implemented DMA. This is working way better, the slave now always receives correct data. But sending is still an issue.
If the transmission is 3 to 5 Bytes long the data from the slave is correct in 95% of all cases.
If the transmission is longer then 5 bytes, then after the 4th or 5th byte there is just random byte foo. But the first 4 bytes are nearly (95%) always correct.
The signals are clean, I checked them with an oscilloscope. The data which the master receives shows up properly on MISO. So I guess the slave somehow writes garbage into the SPI DR, or the data register gets messed up.
I know SPI slaves on non-FPGAs are tricky, but this really is unexpected...
Anyone can point me a direction? I'm desperate and thankful for any bit of advice.
This is the code
void DMA1_Stream3_IRQHandler( void )
{
if (spi2_slave)
{
while( (spi_spc->SR & (1<<1)) == 0 ); // must wait for TXE to be set!
while( spi_spc->SR & (1<<7) ); // must wait for busy to clear!
DMA1_Stream3->CR &= ~(1<<0); // Disable stream 3
while((DMA1_Stream3->CR & (1<<0)) != 0); // Wait till disabled
DMA1_Stream3->NDTR = 3; // Datenmenge zum Empfangen
DMA1_Stream3->CR |= (1<<0); // Enable DMA1_Stream3 (TX)
DMA1->LIFCR = (1<<27); // clear Transfer complete in Stream 3
// fire SPI2 finished CBF
if (spi2_xfer_done != 0)
{
if (spi2_xfer_len > 0)
{
spi2_xfer_done(spi2_rx_buffer, spi2_xfer_len);
}
}
}
else
{
while( spi_spc->SR & (1<<7) ); // must wait for busy to clear!
GPIOB->ODR |= (1<<12); // Pull up SS Pin
spi_spc->CR2 &= ~((1<<0) | (1<<1)); // Disable TX and RX DMA request lines
spi_spc->CR1 &= ~(1<<6); // 6:disableSPI
DMA1->LIFCR = (1<<27); // clear Transfer complete in Stream 3
// fire SPI2 finished CBF
if (spi2_xfer_done != 0)
{
spi2_xfer_done(spi2_rx_buffer, spi2_xfer_len);
}
while( (spi_spc->SR & (1<<1)) == 0 ); // must wait for TXE to be set!
}
}
// For Slave TX DMA
void DMA1_Stream4_IRQHandler( void )
{
DMA1_Stream4->CR &= ~(1<<0); // Disable stream 4
while((DMA1_Stream4->CR & (1<<0)) != 0); // Wait till disabled
spi_spc->CR2 &= ~(1<<1); // Disable TX DMA request lines
DMA1->HIFCR = (1<<5); // clear Transfer complete in Stream 4
}
void mcu_spi_spc_init_slave(void (*xfer_done)(uint8_t* data, uint32_t dlen))
{
spi2_slave = 1;
spi2_xfer_done = xfer_done;
for (int c=0;c<SPI2_BUFFER_SIZE;c++)
{
spi2_tx_buffer[c] = 'X';
spi2_rx_buffer[c] = 0;
}
// Enable the SPI2 peripheral clock
RCC->APB1ENR |= RCC_APB1ENR_SPI2EN;
// Enable port B Clock
RCC->AHB1ENR |= (1<<1);
// Enable DMA1 Clock
RCC->AHB1ENR |= RCC_AHB1ENR_DMA1EN;
// Reset the SPI2 peripheral to initial state
RCC->APB1RSTR |= RCC_APB1RSTR_SPI2RST;
RCC->APB1RSTR &= ~RCC_APB1RSTR_SPI2RST;
/*
* SPC SPI2 SS: Pin33 PB12
* SPC SPI2 SCK: Pin34 PB13
* SPC SPI2 MISO: Pin35 PB14
* SPC SPI2 MOSI: Pin36 PB15
*/
// Configure the SPI2 GPIO pins
GPIOB->MODER |= (2<<24) | (2<<26) | (2<<28) | (2<<30);
GPIOB->PUPDR |= (02<<26) | (2<<28) | (2<<30);
GPIOB->OSPEEDR |= (3<<24) | (3<<26) | (3<<28) | (3<<30); // "very High speed"
GPIOB->AFR[1] |= (5<<16) | (5<<20) | (5<<24) | (5<<28); // Alternate function 5 (SPI2)
//-------------------------------------------------------
// Clock Phase and Polarity = 0
// CR1 = LSByte to MSByte, MSBit first
// DFF = 8bit
// 6 MHz Clock (48MHz / 8)
spi_spc->CR1 = (7<<3) | (0<<2) | (0<<1) | (1<<0) // 0:CPHA, 1:CPOL, 2:MASTER, 3:CLOCK_DIVIDER
| (0<<7) | (0<<11); // 7:LSB first, 11:DFF(8Bit)
spi_spc->CR2 = (0<<2) | (1<<1) | (1<<0); // 2:SSOE, 0:Enable RX DMA IRQ, 1:Enable TX DMA IRQ
// DMA config (Stream3:RX p2mem, Stream4:TX mem2p
// DMA for RX Stream 3 Channel 0
DMA1_Stream3->CR &= ~(1<<0); // EN = 0: disable and reset
while((DMA1_Stream3->CR & (1<<0)) != 0); // Wait
DMA1_Stream4->CR &= ~(1<<0); // EN = 0: disable and reset
while((DMA1_Stream4->CR & (1<<0)) != 0); // Wait
DMA1->LIFCR = (0x3D<<22); // clear all ISRs related to Stream 3
DMA1->HIFCR = (0x3D<< 0); // clear all ISRs related to Stream 4
DMA1_Stream3->PAR = (uint32_t) (&(spi_spc->DR)); // Peripheral addresse
DMA1_Stream3->M0AR = (uint32_t) spi2_rx_buffer; // Memory addresse
DMA1_Stream3->NDTR = 3; // Datenmenge zum Empfangen
DMA1_Stream3->FCR &= ~(1<<2); // ENABLE Direct mode by CLEARING Bit 2
DMA1_Stream3->CR = (0<<25) | // 25:Channel selection(0)
(1<<10) | // 10:increment mem_ptr,
(0<<9) | // 9: Do not increment periph ptr
(0<<6) | // 6: Dir(P -> Mem)
(1<<4); // 4: finish ISR
// DMA for TX Stream 4 Channel 0
DMA1_Stream4->PAR = (uint32_t) (&(spi_spc->DR)); // Peripheral addresse
DMA1_Stream4->M0AR = (uint32_t) spi2_tx_buffer; // Memory addresse
DMA1_Stream4->NDTR = 1; // Datenmenge zum Senden (dummy)
DMA1_Stream4->FCR &= ~(1<<2); // ENABLE Direct mode by CLEARING Bit 2
DMA1_Stream4->CR = (0<<25) | // 25:Channel selection(0)
(1<<10) | // 10:increment mem_ptr,
(0<<9) | // 9: Do not increment periph ptr
(1<<6) | // 6: Dir(Mem -> P)
(1<<4);
// Setup the NVIC to enable interrupts.
// Use 4 bits for 'priority' and 0 bits for 'subpriority'.
NVIC_SetPriorityGrouping( 0 );
uint32_t pri_encoding = NVIC_EncodePriority( 0, 1, 0 );
NVIC_SetPriority( DMA1_Stream4_IRQn, pri_encoding );
NVIC_EnableIRQ( DMA1_Stream4_IRQn );
NVIC_SetPriority( DMA1_Stream3_IRQn, pri_encoding );
NVIC_EnableIRQ( DMA1_Stream3_IRQn );
DMA1_Stream3->CR |= (1<<1); // Enable DMA1_Stream3 (RX)
spi_spc->CR1 |= (1<<6); // 6:EnableSPI
}
In the future the system has to send and receive roughly 500 bytes.
So, I did it. It was a whole bunch of things. Also, my assumption in the question was wrong. My slave did not receive/send valid data.
The signals were shown as clean by the oscilloscope, but the scope itself was adding noise to the lines, that was not visible on the scope itself. Not measuring the lines helped.
I put 100 OHM resistors close to the MASTER pins. This was not working, out of desperation I put the resistors close to the slave instead. Suddenly I got valid data. (This has been the main culprit all along)
According to the comment of Ashley Miller, I implemented a circular buffer, where I always send a fixed length every time. So the slave knows exactly what to expect. This mitigated eventual errors that could be produced when switching off / resetting the DMA shortly after the transmission.
The UART tricked me also. When getting too much data at once ( as little as 20 or 30 bytes! ) my terminal program gliched and threw the bytes randomly around. So part of the problem was just that... I'm using GtkTerm for those who are interested.
The Clock mode CPOL= 0 and CPH = 0 doesn't work at all. I set both master and slave to the same setting and it just received garbage. If I loop back the master to itself (connect MISO to MOSI a.k.a. exclude the slave) then it works regardless of clock mode.
This seems to stem from a timing issue, where the slave has to react too fast and can't handle even the slowest possible speed (approx. 100 kHz). I did not go into details on this.
I hope I could help someone with this.
Related
I'm learning to use DMA on a STM32F446 and tried to send data over USART. The goal is to do some calculations and send the results to a PC via RS232.
Here is my MWE:
#include <stdint.h>
#include <stdio.h>
#include "stm32f446xx.h"
#define BAUDRATE ( 9600 )
#define USART2_TX_PIN (2)
#define USART2_RX_PIN (3)
int main(void) {
volatile uint32_t core_clock_hz = 16000000;
uint16_t uartdiv = core_clock_hz / BAUDRATE;
uint8_t TX_buffer[2];
TX_buffer[0] = 48; // this is a "0" in ASCII
TX_buffer[1] = 49; // this is a "1" in ASCII
// configure peripheral clocks
RCC->APB2ENR |= RCC_APB2ENR_SYSCFGEN; // Enable the SYSCFG peripheral
RCC->APB1ENR |= ( RCC_APB1ENR_USART2EN ); // Enable peripheral clocks: USART2
RCC->AHB1ENR |= ( RCC_AHB1ENR_GPIOAEN ); // Enable peripheral clocks: GPIOA
RCC->AHB1ENR |= ( RCC_AHB1ENR_DMA1EN ); // Enable peripheral clock: DMA1
// Configure pins A2 (TX), A3 (RX) for USART2. TX: alternate out push-pull, RX: in floating
// TX
GPIOA->MODER &= ~(0x3 << (USART2_TX_PIN*2)); // reset all bits
GPIOA->MODER |= (0x2 << (USART2_TX_PIN*2)); // 10 = alternate
GPIOA->OSPEEDR &= ~(0x3 << (USART2_TX_PIN*2)); // reset all bits
GPIOA->OSPEEDR |= (0x0 << (USART2_TX_PIN*2)); // 00 = low speed
GPIOA->OTYPER &= ~(0x1 << USART2_TX_PIN); // 0 = push-pull
GPIOA->PUPDR &= ~(0x3 << (USART2_TX_PIN*2)); // 00 = no pull-up / pull-down
// RX
GPIOA->MODER &= ~(0x3 << (USART2_RX_PIN*2)); // reset all bits
GPIOA->MODER |= (0x2 << (USART2_RX_PIN*2)); // 10 = alternate
GPIOA->PUPDR &= ~(0x3 << (USART2_RX_PIN*2)); // reset all bits
GPIOA->PUPDR |= (0x0 << (USART2_RX_PIN*2)); // 00 = no pull-up / pull-down , 01 = pull-up
// set alternate pin function AF7 for PA2 and PA3. AFR[0] = AFRL
GPIOA->AFR[0] &= ~(0xF << USART2_TX_PIN*4); // clear all bits
GPIOA->AFR[0] |= (0x7 << USART2_TX_PIN*4); // set AF7
GPIOA->AFR[0] &= ~(0xF << USART2_RX_PIN*4); // clear all bits
GPIOA->AFR[0] |= (0x7 << USART2_RX_PIN*4); // set AF7
USART2->BRR = ( ( ( uartdiv / 16 ) << USART_BRR_DIV_Mantissa_Pos ) | ( ( uartdiv % 16 ) << USART_BRR_DIV_Fraction_Pos ) ); // configure USART baudrate
USART2->CR1 |= ( USART_CR1_RE | USART_CR1_TE | USART_CR1_UE ); // Enable the USART peripheral
// Main loop
while ( 1 ) {
DMA1_Stream6->CR &= ~(DMA_SxCR_EN); // deactivate DMA stream for configuration
DMA1_Stream6->CR &= ~(DMA_SxCR_CHSEL); // clear bits
DMA1_Stream6->CR |= (DMA_SxCR_CHSEL_2); // 100 = channel 4
DMA1_Stream6->CR &= ~(DMA_SxCR_PL); // priority 00 = low
DMA1_Stream6->CR &= ~(DMA_SxCR_PSIZE); // size 00 = 8bit
DMA1_Stream6->CR |= (DMA_SxCR_MINC); // increment memory pointer with each DMA transfer
DMA1_Stream6->CR &= ~(DMA_SxCR_DIR); // clear bits
DMA1_Stream6->CR |= (DMA_SxCR_DIR_0); // 01 = memory-to-peripheral
DMA1_Stream6->PAR = ( uint32_t )&USART2->DR; // peripheral memory address
DMA1_Stream6->M0AR = ( uint32_t )&TX_buffer; // data memory address
DMA1_Stream6->NDTR = ( uint16_t ) 2; // number of bytes to transfer
DMA1->HISR &= ~(DMA_HISR_TCIF6 | DMA_HISR_HTIF6 | DMA_HISR_TEIF6 | DMA_HISR_DMEIF6 | DMA_HISR_FEIF6); // clear DMA flags
USART2->SR &= ~(USART_SR_TC); // clear USART transfer complete flag
DMA1_Stream6->CR |= (DMA_SxCR_EN); // set EN bit to activate DMA stream
// does not help: USART2->CR1 |= ( USART_CR1_RE | USART_CR1_TE | USART_CR1_UE ); // Enable the USART peripheral
USART2->CR3 |= (USART_CR3_DMAT); // enable USART DMA mode
// wait for end of transfer
while ( !(DMA1->HISR && DMA_HISR_TCIF6) ) {}
while ( !(USART2->SR && USART_SR_TC) ) {}
//
// do calculations here, modify TX_buffer for next transfer cycle
//
} // while (1)
} // main
The code should send the data in TX_buffer in an endless loop, thus I was expecting to receive a sequence of 01010101... in the PC's terminal. However, I only get a single 01 and then the transmission stops. As data is generally sent, GPIOs, clocks, ... seem to be configured correctly.
I guess, after one loop cycle of the while(1), the DMA or the USART are not reset to a state where they accept new transfers, but I couldn't figure out what exactly is missing. I already thought about missing ISR routines and IRQs. Many examples on the net use them, but I could not find any functionality in them which is not already in my main loop. Thus, my MWE does not use any interrupts or interrupt routines. All interrupts are deactivated in the DMA configuration register.
In circular mode of the DMA, the endless transmission is working, but this seems not to be the appropriate solution for my scenario of calculate -> send > calculate -> send -> ...
How do USART and DMA have to be configured in this case to allow multiple subsequent transmissions?
EDIT:
Added a compileable MWE.
Additional information which might be helpful:
There are also no interrupts configured for the USART.
My compiler options are:
CFLAGS += -mcpu=$(MCU_SPEC)
CFLAGS += -mthumb
CFLAGS += -Wall
# (Set error messages to appear on a single line.)
CFLAGS += -fmessage-length=0
CFLAGS += --specs=nosys.specs
CFLAGS += -ffunction-sections
CFLAGS += -fdata-sections
CFLAGS += -lm
# (Custom flags sent to the compiler)
CFLAGS += -D$(ST_MCU_DEF)
CFLAGS += -DVVC_$(MCU_CLASS)
#CFLAGS += -DVVC_$(MCU)
# FPU config
ifeq ($(MCU_CLASS), $(filter $(MCU_CLASS), L4 G4 WB F4))
CFLAGS += -mhard-float
CFLAGS += -mfloat-abi=hard
CFLAGS += -mfpu=fpv4-sp-d16
else
CFLAGS += -msoft-float
CFLAGS += -mfloat-abi=soft
endif
Let's look at the reference manual of the MCU together. You are not clearing the flags of DMA.
DMA1->HISR &= ~(DMA_HISR_TCIF6 | DMA_HISR_HTIF6 | DMA_HISR_TEIF6 | DMA_HISR_DMEIF6 | DMA_HISR_FEIF6); // clear DMA flags
USART2->SR &= ~(USART_SR_TC); // clear USART transfer complete flag
One of these lines works, the other one doesn't do anything, because
USART SR TC bit says it's rc_w0, while DMA's HISR is all "r" - read only bits. Writing to that register doesn't do anything. You need to use dedicated clear flag register of DMA.
So instead, this should work (the register is write-only):
DMA1->HIFCR = DMA_HIFCR_CTCIF6 | DMA_HIFCR_CHTIF6 | DMA_HIFCR_CTEIF6 | DMA_HIFCR_CDMEIF6 | DMA_HIFCR_CFEIF6; // clear DMA flags
Notice I'm not using |=, because |= will mean that we need to read the register first (like x |= y means x = x | y), and the register is not readable. So you prepare value for it and write it straight there without reading anything from it.
I'm using the stm32f767zi, and I'm trying to send test data over the USART peripheral. I've done the same configuration as I always do on any device, but this time it does not output anything... I cannot find the mistake, can someone help ?
The clock setup
// Enables TIM8 (Delay), USART1 (STDOUT)
RCC->APB2ENR |= (RCC_APB2ENR_TIM8EN
| RCC_APB2ENR_USART1EN);
// Enables GPIOA, GPIOB, GPIOC, GPIOD, GPIOE, GPIOF, DMA1
RCC->AHB1ENR |= (RCC_AHB1ENR_GPIOAEN
| RCC_AHB1ENR_GPIOBEN
| RCC_AHB1ENR_GPIOCEN
| RCC_AHB1ENR_GPIODEN
| RCC_AHB1ENR_GPIOEEN
| RCC_AHB1ENR_GPIOFEN
| RCC_AHB1ENR_DMA1EN);
The initialization code
// Makes A8 (TX) and A9 (RX) Alternative Function
GPIOA->MODER &= ~(GPIO_MODER_MODER8_Msk
| GPIO_MODER_MODER9_Msk);
GPIOA->MODER |= ((0x2 << GPIO_MODER_MODER8_Pos)
| (0x2 << GPIO_MODER_MODER9_Pos));
// Selects AF7 for both A8 (TX) and A9 (RX).
GPIOA->AFR[1] &= ~(GPIO_AFRH_AFRH0_Msk
| GPIO_AFRH_AFRH1_Msk);
GPIOA->AFR[1] |= ((7 << GPIO_AFRH_AFRH0_Pos)
| (7 << GPIO_AFRH_AFRH1_Pos));
// Selects very high speed for A8 (TX) and A9 (RX)
GPIOA->OSPEEDR &= ~(GPIO_OSPEEDR_OSPEEDR8_Msk
| GPIO_OSPEEDR_OSPEEDR9_Msk);
GPIOA->OSPEEDR |= ((0x3 << GPIO_OSPEEDR_OSPEEDR8_Pos)
| (0x3 << GPIO_OSPEEDR_OSPEEDR9_Pos));
// Calculates and sets the baud rate.
m_USART->BRR = (((2 * clk) + baud) / (2 * baud));
// Configures the USART peripheral further.
m_USART->CR1 = USART_CR1_TE // Transmit Enable
| USART_CR1_RE // Receive Enable
| USART_CR1_UE; // USART Enable (EN)
and the write function:
*reinterpret_cast<uint8_t *>(m_USART->TDR) = c;
while (!(m_USART->ISR & USART_ISR_TC));
The problem you have is that you set the wrong pins. It should be PA9 & PA10
your write is also wrong
it should be :
*reinterpret_cast<volatile uint8_t *>(&m_USART->TDR) = c;
or C style:
*(volatile uint8_t *)(&m_USART->TDR) = c;
you are also checking the wrong flag.
TC is important if you want to disable the peripheral after the transition. In normal conditions use TXE flag instead.
while (!(m_USART->ISR & USART_ISR_TXE));
Your reinterpret_cast is unnecessary and incorrect.
I assume you actually wrote *reinterpret_cast<uint8_t *>(&m_USART->TDR) = c; but that is still wrong.
The person who wrote the standard device header has taken great care to make sure that USARTx->TDR already has the correct type, I strongly advise you to trust them and not cast it! In this particular case they will have made it volatile, and you have not, so it is possible that the compiler thinks it can make an optimization by not bothering to perform a write to something that you never read back.
The reason you probably got away with this on other STM32 parts is that their UARTs have just DR for both transmit and receive so reading DR for reception made the compiler think it couldn't eliminate the write.
Also, I don't know about this part, but many STM32 need an extra cycle between writing to RCC->xxxENR and using the respective peripheral, this is usually done with a read of the same register, eg:
RCC->AHB1ENR |= RCC_AHB1ENR_GPIOAEN;
(void)RCC->AHB1ENR;
// now safe to access GPIOA registers
I'm currently having problem with RTC clock (LSE) on STM32L433.
When Vdd is supplied and rtc configured clock is working as expected.
As soon as i remove vdd (vbat provided by coin cell) lse crystal stops oscillating hence rtc problem but backup registers are preserved time is frozen for vdd off period.
What could be cause of the problem? is there any pwr register that i'm missing?
Thanks for response in advance.
RTC setup code (library for registers is libopencm3)
void rtc_setup(time_struct time, date_struct date)
{
usart_puts("1\n");
rtc_wkup_set = false;
/* cause a backup domain reset to select the clock source for RTC */
RCC_BDCR |= RCC_BDCR_BDRST;
RCC_BDCR &= ~RCC_BDCR_BDRST;
pwr_disable_backup_domain_write_protect();
RCC_BDCR &= ~(RCC_BDCR_RTCSEL_MASK << RCC_BDCR_RTCSEL_SHIFT);
RCC_BDCR |= RCC_BDCR_RTCEN | (RCC_BDCR_RTCSEL_LSE << RCC_BDCR_RTCSEL_SHIFT) | (RCC_BDCR_LSEDRV_SHIFT << RCC_BDCR_LSEDRV_HIGH) | RCC_BDCR_LSEON;
//TODO: Add timeout for RTC FAIL
while ((RCC_BDCR & RCC_BDCR_LSERDY) == 0)
;
usart_puts("2\n");
/* enable RTC */
rtc_unlock();
RTC_ISR |= RTC_ISR_INIT;
while (!(RTC_ISR & RTC_ISR_INITF))
;
RTC_CR &= ~(RTC_CR_FMT);
RTC_DR = (uint32_t)date;
RTC_TR = (uint32_t)time;
RTC_ISR &= ~(RTC_ISR_INIT);
usart_puts("3\n");
rtc_lock();
}
Issue was short on one of the legs of stm due to flux residue.
Issue solved
I try to make master and slave stm32f103 (bluepills) communicate. But I am having trouble with receiving. When I connect my master to logic analyser I can see MOSI as in the picture
Logic Analyser
In the picture, MOSI is sending "Y" letter. But not all clock pulses same.(I don't know if this is the reason of communication fail)
here is my schematics and my code I simplified as much as I can.
Master Code:
int i;
RCC ->APB2ENR |= 0x00001004; //SPI1,GPIOA clock en
GPIOA ->CRL &= 0x00000000;
GPIOA ->CRL |= 0xb0b33333;
SPI1->CR1 = SPI_CR1_SSM| SPI_CR1_SSI| SPI_CR1_MSTR|SPI_CR1_BR_2;
SPI1->CR1 |= SPI_CR1_SPE; // enable SPI
while(1){
SPI1 -> DR = 'A';
for(int i = 0 ;i<400000;i++);
while( !(SPI1->SR & SPI_SR_TXE) ); // wait until transmit buffer empty
}
and Slave
int i;
RCC ->APB2ENR |= 0x0000100c; //SPI1,GPIOA,GPIOB clock en
GPIOB ->CRH &= 0x00000000;
GPIOB ->CRH |= 0x33333333;
GPIOA ->CRL &= 0x00000000;
GPIOA ->CRL |= 0x4b443333;
GPIOA ->CRH &= 0x00000000;
GPIOA ->CRH |= 0x33333333;
SPI1->CR1 = SPI_CR1_SSM| SPI_CR1_SSI| SPI_CR1_BR_2;
SPI1->CR1 |= SPI_CR1_SPE; // enable SPI
SPI1->CR1 &=~SPI_CR1_MSTR; //disable master
for(int c=0;c<5;c++){
LCD_INIT(cmd[c]);
}
while(1){
while( !(SPI1->SR & SPI_SR_RXNE));
char a = SPI1 ->DR;
for (i=0;i<400000;i++);
LCD_DATA(a);
for (i=0;i<400000;i++);
}
}
My Schematic:
Schematic
The problem is slave is not receiving any data.
It stucks in the loop while( !(SPI1->SR & SPI_SR_RXNE));
First, what are your HCLK and APB2 bus frequencies? If I'm not mistaken, you seem to use (fPLCK / 32) for SPI clock and your logic analyzer shows ~2 or 3 MHz clock. If your APB2 frequency is higher than the 72 MHz limit, you may experience clock problems.
In the slave, you use SSM (software slave management) and activate SSI (internal slave select). The name of the SSI bit is misleading: It mimics the physical NSS pin. So when SSI = 1, the slave is not selected. This is probably the reason why the slave ignores the incoming bytes.
I'm trying to set up an interrupt-handler in my driver for DM6446 GPIO BANK 0 interrupt.But request_irq returns -22.I know the Interrupt number for GPIO BANK-0 from the data sheet which states it to be 56.Following are the settings for GPIO in my code.I want to get interrupt on GPIO-10.
while((REG_VAL(PTSTAT) & 0x1) != 0); // Wait for power state transtion to finish
REG_VAL(MDCTL26) = 0x00000203; //To enable GPIO module and EMURSITE BIT as stated in sprue14 for state transition
REG_VAL(PTCMD) = 0x1; // Start power state transition for ALWAYSON
while((REG_VAL(PTSTAT) & 0x1) != 0); // Wait for power state transtion to finish
REG_VAL(PINMUX0) = REG_VAL(PINMUX0) & 0x80000000; //Disbale other Functionlaity on BANK 0 pins
printk(KERN_DEBUG "I2C: PINMUX0 = %x\n",REG_VAL(PINMUX0));
REG_VAL(DIR01) = REG_VAL(DIR01) | 0xFFFFFFFF; //Set direction as input for GPIO 0 and 10
REG_VAL(BINTEN) = REG_VAL(BINTEN) | 0x00000001; //Enable Interrupt for GPIO Bank 0
REG_VAL(SET_RIS_TRIG01) = REG_VAL(SET_RIS_TRIG01) | 0x00000401; // Enable rising edge interrupt of GPIO BANK 0 PIN 0 PIN 10
REG_VAL(CLR_FAL_TRIG01) = REG_VAL(CLR_FAL_TRIG01) | 0x00000401; // Disable falling edge interrupt of Bank 0
Result = request_irq(56,Gpio_Interrupt_Handler,0,"gpio",I2C_MAJOR);
if(Result < 0)
{
printk(KERN_ALERT "UNABLE TO REQUEST GPIO IRQ %d ",Result);
}
A little help shall be appreciated.
Thank you.
I have tried the gpio_to_irq as well for PIN-10 of BANK-0 but it returns irq no to be 72 but DM6446 has interrupt number upto 63 only in Data sheet.
I got it. If i use gpio_to_irq, It will return a valid IRQ number but different than the interrupt number(which i guess is also called IRQ number) specified in data sheet of Processor.If I see the /proc/interrupts, it will have an entry of that IRQ returned form gpio_to_irq but under GPIO type not the processor's Interrupt controller, which in my case for ARM shall be AINTC.All other interrupts are of AINTC type.
Moreover, Even if request_irq succeeds with interrupt number stated in data sheet,/proc/stat will report interrupts at both IRQ numbers i.e. AINTC and GPIO type.