import scrapyclass CsProductSpider(scrapy.Spider):name = "cs_product"allowed_domains = ["www.cs.ecitic.com"]start_urls = ["http://www.cs.ecitic.com/newsite/cpzx/jrcpxxgs/zgcp/index.html"]def parse(self, response):# 提取数据的逻辑,根据实际情况调整list_con = response.css('.list-con li')for item in list_con:product_name = item.css('.th1::text').get()manager = item.css('.th2::text').get()risk_rating = item.css('.th3::text').get()subscription_amount = item.css('.th4::text').get()yield {"产品名称": product_name,"管理人": manager,"风险评级": risk_rating,"认购金额起点": subscription_amount,"公示信息详情链接": None}# 检查是否有页码导航,如果有则遍历页码进行翻页page_nav = response.css('.pagination li a::attr(href)').getall()page_nav = [link for link in page_nav if 'index_' in link] # 过滤出包含index_的链接for page_url in page_nav:yield response.follow(page_url, callback=self.parse)# 如果没有页码导航,尝试从页面结构中提取页码if not page_nav:total_pages = 103 # 根据网页上显示的总页数确定for page_num in range(2, total_pages + 1):page_url = f"http://www.cs.ecitic.com/newsite/cpzx/jrcpxxgs/zgcp/index_{page_num - 1}.html"yield response.follow(page_url, callback=self.parse)